blob: 4fe04ac0bae039a148e339e940ae7c88ee223fc0 [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070043#include <linux/debugfs.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044#include <linux/bug.h>
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -070045#include <linux/vmalloc.h>
Randy Dunlap44408ad2009-05-12 13:31:40 -070046#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090047#include <linux/gfp.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -070048#include <linux/memblock.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070049
50#include <asm/pgtable.h>
51#include <asm/tlbflush.h>
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -070052#include <asm/fixmap.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070053#include <asm/mmu_context.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080054#include <asm/setup.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070055#include <asm/paravirt.h>
Alex Nixon7347b402010-02-19 13:31:06 -050056#include <asm/e820.h>
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070057#include <asm/linkage.h>
Alex Nixon08bbc9d2009-02-09 12:05:46 -080058#include <asm/page.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070059
60#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070061#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070062
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080063#include <xen/xen.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070064#include <xen/page.h>
65#include <xen/interface/xen.h>
Stefano Stabellini59151002010-06-17 14:22:52 +010066#include <xen/interface/hvm/hvm_op.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080067#include <xen/interface/version.h>
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080068#include <xen/interface/memory.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080069#include <xen/hvc-console.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070070
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070071#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070072#include "mmu.h"
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070073#include "debugfs.h"
74
75#define MMU_UPDATE_HISTO 30
76
Alex Nixon19001c82009-02-09 12:05:46 -080077/*
78 * Protects atomic reservation decrease/increase against concurrent increases.
79 * Also protects non-atomic updates of current_pages and driver_pages, and
80 * balloon lists.
81 */
82DEFINE_SPINLOCK(xen_reservation_lock);
83
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070084#ifdef CONFIG_XEN_DEBUG_FS
85
86static struct {
87 u32 pgd_update;
88 u32 pgd_update_pinned;
89 u32 pgd_update_batched;
90
91 u32 pud_update;
92 u32 pud_update_pinned;
93 u32 pud_update_batched;
94
95 u32 pmd_update;
96 u32 pmd_update_pinned;
97 u32 pmd_update_batched;
98
99 u32 pte_update;
100 u32 pte_update_pinned;
101 u32 pte_update_batched;
102
103 u32 mmu_update;
104 u32 mmu_update_extended;
105 u32 mmu_update_histo[MMU_UPDATE_HISTO];
106
107 u32 prot_commit;
108 u32 prot_commit_batched;
109
110 u32 set_pte_at;
111 u32 set_pte_at_batched;
112 u32 set_pte_at_pinned;
113 u32 set_pte_at_current;
114 u32 set_pte_at_kernel;
115} mmu_stats;
116
117static u8 zero_stats;
118
119static inline void check_zero(void)
120{
121 if (unlikely(zero_stats)) {
122 memset(&mmu_stats, 0, sizeof(mmu_stats));
123 zero_stats = 0;
124 }
125}
126
127#define ADD_STATS(elem, val) \
128 do { check_zero(); mmu_stats.elem += (val); } while(0)
129
130#else /* !CONFIG_XEN_DEBUG_FS */
131
132#define ADD_STATS(elem, val) do { (void)(val); } while(0)
133
134#endif /* CONFIG_XEN_DEBUG_FS */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700135
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -0800136
137/*
138 * Identity map, in addition to plain kernel map. This needs to be
139 * large enough to allocate page table pages to allocate the rest.
140 * Each page can map 2MB.
141 */
142static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss;
143
144#ifdef CONFIG_X86_64
145/* l3 pud for userspace vsyscall mapping */
146static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
147#endif /* CONFIG_X86_64 */
148
149/*
150 * Note about cr3 (pagetable base) values:
151 *
152 * xen_cr3 contains the current logical cr3 value; it contains the
153 * last set cr3. This may not be the current effective cr3, because
154 * its update may be being lazily deferred. However, a vcpu looking
155 * at its own cr3 can use this value knowing that it everything will
156 * be self-consistent.
157 *
158 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
159 * hypercall to set the vcpu cr3 is complete (so it may be a little
160 * out of date, but it will never be set early). If one vcpu is
161 * looking at another vcpu's cr3 value, it should use this variable.
162 */
163DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
164DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
165
166
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700167/*
168 * Just beyond the highest usermode address. STACK_TOP_MAX has a
169 * redzone above it, so round it up to a PGD boundary.
170 */
171#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
172
173
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100174#define P2M_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100175#define TOP_ENTRIES (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100176
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100177/* Placeholder for holes in the address space */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -0700178static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE] __page_aligned_data =
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100179 { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };
180
181 /* Array of pointers to pages containing p2m entries */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -0700182static unsigned long *p2m_top[TOP_ENTRIES] __page_aligned_data =
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100183 { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100184
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100185/* Arrays of p2m arrays expressed in mfns used for save/restore */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -0700186static unsigned long p2m_top_mfn[TOP_ENTRIES] __page_aligned_bss;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100187
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -0700188static unsigned long p2m_top_mfn_list[TOP_ENTRIES / P2M_ENTRIES_PER_PAGE]
189 __page_aligned_bss;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100190
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100191static inline unsigned p2m_top_index(unsigned long pfn)
192{
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100193 BUG_ON(pfn >= MAX_DOMAIN_PAGES);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100194 return pfn / P2M_ENTRIES_PER_PAGE;
195}
196
197static inline unsigned p2m_index(unsigned long pfn)
198{
199 return pfn % P2M_ENTRIES_PER_PAGE;
200}
201
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100202/* Build the parallel p2m_top_mfn structures */
Ian Campbellfa24ba62009-11-21 11:32:49 +0000203void xen_build_mfn_list_list(void)
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100204{
205 unsigned pfn, idx;
206
Tejf63c2f22008-12-16 11:56:06 -0800207 for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100208 unsigned topidx = p2m_top_index(pfn);
209
210 p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]);
211 }
212
Tejf63c2f22008-12-16 11:56:06 -0800213 for (idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100214 unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
215 p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
216 }
Jeremy Fitzhardingecdaead62009-02-27 15:34:59 -0800217}
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100218
Jeremy Fitzhardingecdaead62009-02-27 15:34:59 -0800219void xen_setup_mfn_list_list(void)
220{
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100221 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
222
223 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
224 virt_to_mfn(p2m_top_mfn_list);
225 HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages;
226}
227
228/* Set up p2m_top to point to the domain-builder provided p2m pages */
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100229void __init xen_build_dynamic_phys_to_machine(void)
230{
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100231 unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100232 unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100233 unsigned pfn;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100234
Tejf63c2f22008-12-16 11:56:06 -0800235 for (pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100236 unsigned topidx = p2m_top_index(pfn);
237
238 p2m_top[topidx] = &mfn_list[pfn];
239 }
Jeremy Fitzhardingecdaead62009-02-27 15:34:59 -0800240
241 xen_build_mfn_list_list();
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100242}
243
244unsigned long get_phys_to_machine(unsigned long pfn)
245{
246 unsigned topidx, idx;
247
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100248 if (unlikely(pfn >= MAX_DOMAIN_PAGES))
249 return INVALID_P2M_ENTRY;
250
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100251 topidx = p2m_top_index(pfn);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100252 idx = p2m_index(pfn);
253 return p2m_top[topidx][idx];
254}
Ingo Molnar15ce60052008-06-02 13:20:11 +0200255EXPORT_SYMBOL_GPL(get_phys_to_machine);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100256
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800257/* install a new p2m_top page */
258bool install_p2mtop_page(unsigned long pfn, unsigned long *p)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100259{
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800260 unsigned topidx = p2m_top_index(pfn);
261 unsigned long **pfnp, *mfnp;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100262 unsigned i;
263
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800264 pfnp = &p2m_top[topidx];
265 mfnp = &p2m_top_mfn[topidx];
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100266
Tejf63c2f22008-12-16 11:56:06 -0800267 for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100268 p[i] = INVALID_P2M_ENTRY;
269
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800270 if (cmpxchg(pfnp, p2m_missing, p) == p2m_missing) {
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100271 *mfnp = virt_to_mfn(p);
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800272 return true;
273 }
274
275 return false;
276}
277
278static void alloc_p2m(unsigned long pfn)
279{
280 unsigned long *p;
281
282 p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
283 BUG_ON(p == NULL);
284
285 if (!install_p2mtop_page(pfn, p))
286 free_page((unsigned long)p);
287}
288
289/* Try to install p2m mapping; fail if intermediate bits missing */
290bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
291{
292 unsigned topidx, idx;
293
294 if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
295 BUG_ON(mfn != INVALID_P2M_ENTRY);
296 return true;
297 }
298
299 topidx = p2m_top_index(pfn);
300 if (p2m_top[topidx] == p2m_missing) {
301 if (mfn == INVALID_P2M_ENTRY)
302 return true;
303 return false;
304 }
305
306 idx = p2m_index(pfn);
307 p2m_top[topidx][idx] = mfn;
308
309 return true;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100310}
311
312void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
313{
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100314 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
315 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
316 return;
317 }
318
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800319 if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
320 alloc_p2m(pfn);
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100321
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800322 if (!__set_phys_to_machine(pfn, mfn))
323 BUG();
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100324 }
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100325}
326
Jeremy Fitzhardinge9976b392009-02-27 09:19:26 -0800327unsigned long arbitrary_virt_to_mfn(void *vaddr)
328{
329 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
330
331 return PFN_DOWN(maddr.maddr);
332}
333
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700334xmaddr_t arbitrary_virt_to_machine(void *vaddr)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700335{
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700336 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100337 unsigned int level;
Chris Lalancette9f32d212008-10-23 17:40:25 -0700338 pte_t *pte;
339 unsigned offset;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700340
Chris Lalancette9f32d212008-10-23 17:40:25 -0700341 /*
342 * if the PFN is in the linear mapped vaddr range, we can just use
343 * the (quick) virt_to_machine() p2m lookup
344 */
345 if (virt_addr_valid(vaddr))
346 return virt_to_machine(vaddr);
347
348 /* otherwise we have to do a (slower) full page-table walk */
349
350 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700351 BUG_ON(pte == NULL);
Chris Lalancette9f32d212008-10-23 17:40:25 -0700352 offset = address & ~PAGE_MASK;
Jeremy Fitzhardingeebd879e2008-07-08 15:06:54 -0700353 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700354}
355
356void make_lowmem_page_readonly(void *vaddr)
357{
358 pte_t *pte, ptev;
359 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100360 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700361
Ingo Molnarf0646e42008-01-30 13:33:43 +0100362 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700363 BUG_ON(pte == NULL);
364
365 ptev = pte_wrprotect(*pte);
366
367 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
368 BUG();
369}
370
371void make_lowmem_page_readwrite(void *vaddr)
372{
373 pte_t *pte, ptev;
374 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100375 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700376
Ingo Molnarf0646e42008-01-30 13:33:43 +0100377 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700378 BUG_ON(pte == NULL);
379
380 ptev = pte_mkwrite(*pte);
381
382 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
383 BUG();
384}
385
386
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700387static bool xen_page_pinned(void *ptr)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100388{
389 struct page *page = virt_to_page(ptr);
390
391 return PagePinned(page);
392}
393
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800394static bool xen_iomap_pte(pte_t pte)
395{
Alex Nixon7347b402010-02-19 13:31:06 -0500396 return pte_flags(pte) & _PAGE_IOMAP;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800397}
398
399static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval)
400{
401 struct multicall_space mcs;
402 struct mmu_update *u;
403
404 mcs = xen_mc_entry(sizeof(*u));
405 u = mcs.args;
406
407 /* ptep might be kmapped when using 32-bit HIGHPTE */
408 u->ptr = arbitrary_virt_to_machine(ptep).maddr;
409 u->val = pte_val_ma(pteval);
410
411 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_IO);
412
413 xen_mc_issue(PARAVIRT_LAZY_MMU);
414}
415
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700416static void xen_extend_mmu_update(const struct mmu_update *update)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700417{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700418 struct multicall_space mcs;
419 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700420
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700421 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
422
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700423 if (mcs.mc != NULL) {
424 ADD_STATS(mmu_update_extended, 1);
425 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1);
426
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700427 mcs.mc->args[1]++;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700428
429 if (mcs.mc->args[1] < MMU_UPDATE_HISTO)
430 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1);
431 else
432 ADD_STATS(mmu_update_histo[0], 1);
433 } else {
434 ADD_STATS(mmu_update, 1);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700435 mcs = __xen_mc_entry(sizeof(*u));
436 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700437 ADD_STATS(mmu_update_histo[1], 1);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700438 }
439
440 u = mcs.args;
441 *u = *update;
442}
443
444void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
445{
446 struct mmu_update u;
447
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700448 preempt_disable();
449
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700450 xen_mc_batch();
451
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700452 /* ptr may be ioremapped for 64-bit pagetable setup */
453 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700454 u.val = pmd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700455 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700456
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700457 ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
458
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700459 xen_mc_issue(PARAVIRT_LAZY_MMU);
460
461 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700462}
463
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100464void xen_set_pmd(pmd_t *ptr, pmd_t val)
465{
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700466 ADD_STATS(pmd_update, 1);
467
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100468 /* If page is not pinned, we can just update the entry
469 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700470 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100471 *ptr = val;
472 return;
473 }
474
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700475 ADD_STATS(pmd_update_pinned, 1);
476
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100477 xen_set_pmd_hyper(ptr, val);
478}
479
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700480/*
481 * Associate a virtual page frame with a given physical page frame
482 * and protection flags for that frame.
483 */
484void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
485{
Jeremy Fitzhardinge836fe2f2008-07-08 15:06:58 -0700486 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700487}
488
489void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
490 pte_t *ptep, pte_t pteval)
491{
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800492 if (xen_iomap_pte(pteval)) {
493 xen_set_iomap_pte(ptep, pteval);
494 goto out;
495 }
496
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700497 ADD_STATS(set_pte_at, 1);
498// ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep));
499 ADD_STATS(set_pte_at_current, mm == current->mm);
500 ADD_STATS(set_pte_at_kernel, mm == &init_mm);
501
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700502 if (mm == current->mm || mm == &init_mm) {
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700503 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700504 struct multicall_space mcs;
505 mcs = xen_mc_entry(0);
506
507 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700508 ADD_STATS(set_pte_at_batched, 1);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700509 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700510 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700511 } else
512 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700513 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700514 }
515 xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700516
Jeremy Fitzhardinge2829b442009-02-17 23:53:19 -0800517out: return;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700518}
519
Tejf63c2f22008-12-16 11:56:06 -0800520pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
521 unsigned long addr, pte_t *ptep)
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700522{
523 /* Just return the pte as-is. We preserve the bits on commit */
524 return *ptep;
525}
526
527void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
528 pte_t *ptep, pte_t pte)
529{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700530 struct mmu_update u;
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700531
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700532 xen_mc_batch();
533
Chris Lalancette9f32d212008-10-23 17:40:25 -0700534 u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700535 u.val = pte_val_ma(pte);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700536 xen_extend_mmu_update(&u);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700537
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700538 ADD_STATS(prot_commit, 1);
539 ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
540
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700541 xen_mc_issue(PARAVIRT_LAZY_MMU);
542}
543
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700544/* Assume pteval_t is equivalent to all the other *val_t types. */
545static pteval_t pte_mfn_to_pfn(pteval_t val)
546{
547 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700548 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700549 pteval_t flags = val & PTE_FLAGS_MASK;
Jeremy Fitzhardinged8355ac2008-07-03 22:10:18 -0700550 val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700551 }
552
553 return val;
554}
555
556static pteval_t pte_pfn_to_mfn(pteval_t val)
557{
558 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700559 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700560 pteval_t flags = val & PTE_FLAGS_MASK;
Jeremy Fitzhardinged8355ac2008-07-03 22:10:18 -0700561 val = ((pteval_t)pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700562 }
563
564 return val;
565}
566
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800567static pteval_t iomap_pte(pteval_t val)
568{
569 if (val & _PAGE_PRESENT) {
570 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
571 pteval_t flags = val & PTE_FLAGS_MASK;
572
573 /* We assume the pte frame number is a MFN, so
574 just use it as-is. */
575 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
576 }
577
578 return val;
579}
580
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700581pteval_t xen_pte_val(pte_t pte)
582{
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800583 if (xen_initial_domain() && (pte.pte & _PAGE_IOMAP))
584 return pte.pte;
585
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700586 return pte_mfn_to_pfn(pte.pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700587}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800588PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700589
590pgdval_t xen_pgd_val(pgd_t pgd)
591{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700592 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700593}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800594PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700595
596pte_t xen_make_pte(pteval_t pte)
597{
Alex Nixon7347b402010-02-19 13:31:06 -0500598 phys_addr_t addr = (pte & PTE_PFN_MASK);
599
600 /*
601 * Unprivileged domains are allowed to do IOMAPpings for
602 * PCI passthrough, but not map ISA space. The ISA
603 * mappings are just dummy local mappings to keep other
604 * parts of the kernel happy.
605 */
606 if (unlikely(pte & _PAGE_IOMAP) &&
607 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800608 pte = iomap_pte(pte);
Alex Nixon7347b402010-02-19 13:31:06 -0500609 } else {
610 pte &= ~_PAGE_IOMAP;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800611 pte = pte_pfn_to_mfn(pte);
Alex Nixon7347b402010-02-19 13:31:06 -0500612 }
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800613
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700614 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700615}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800616PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700617
618pgd_t xen_make_pgd(pgdval_t pgd)
619{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700620 pgd = pte_pfn_to_mfn(pgd);
621 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700622}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800623PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700624
625pmdval_t xen_pmd_val(pmd_t pmd)
626{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700627 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700628}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800629PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100630
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100631void xen_set_pud_hyper(pud_t *ptr, pud_t val)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700632{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700633 struct mmu_update u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700634
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700635 preempt_disable();
636
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700637 xen_mc_batch();
638
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700639 /* ptr may be ioremapped for 64-bit pagetable setup */
640 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700641 u.val = pud_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700642 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700643
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700644 ADD_STATS(pud_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
645
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700646 xen_mc_issue(PARAVIRT_LAZY_MMU);
647
648 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700649}
650
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100651void xen_set_pud(pud_t *ptr, pud_t val)
652{
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700653 ADD_STATS(pud_update, 1);
654
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100655 /* If page is not pinned, we can just update the entry
656 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700657 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100658 *ptr = val;
659 return;
660 }
661
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700662 ADD_STATS(pud_update_pinned, 1);
663
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100664 xen_set_pud_hyper(ptr, val);
665}
666
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700667void xen_set_pte(pte_t *ptep, pte_t pte)
668{
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800669 if (xen_iomap_pte(pte)) {
670 xen_set_iomap_pte(ptep, pte);
671 return;
672 }
673
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700674 ADD_STATS(pte_update, 1);
675// ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
676 ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
677
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700678#ifdef CONFIG_X86_PAE
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700679 ptep->pte_high = pte.pte_high;
680 smp_wmb();
681 ptep->pte_low = pte.pte_low;
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700682#else
683 *ptep = pte;
684#endif
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700685}
686
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700687#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700688void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
689{
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800690 if (xen_iomap_pte(pte)) {
691 xen_set_iomap_pte(ptep, pte);
692 return;
693 }
694
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700695 set_64bit((u64 *)ptep, native_pte_val(pte));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700696}
697
698void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
699{
700 ptep->pte_low = 0;
701 smp_wmb(); /* make sure low gets written first */
702 ptep->pte_high = 0;
703}
704
705void xen_pmd_clear(pmd_t *pmdp)
706{
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100707 set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700708}
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700709#endif /* CONFIG_X86_PAE */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700710
Jeremy Fitzhardingeabf33032008-03-17 16:37:07 -0700711pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700712{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700713 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700714 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700715}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800716PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700717
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700718#if PAGETABLE_LEVELS == 4
719pudval_t xen_pud_val(pud_t pud)
720{
721 return pte_mfn_to_pfn(pud.pud);
722}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800723PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700724
725pud_t xen_make_pud(pudval_t pud)
726{
727 pud = pte_pfn_to_mfn(pud);
728
729 return native_make_pud(pud);
730}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800731PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700732
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700733pgd_t *xen_get_user_pgd(pgd_t *pgd)
734{
735 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
736 unsigned offset = pgd - pgd_page;
737 pgd_t *user_ptr = NULL;
738
739 if (offset < pgd_index(USER_LIMIT)) {
740 struct page *page = virt_to_page(pgd_page);
741 user_ptr = (pgd_t *)page->private;
742 if (user_ptr)
743 user_ptr += offset;
744 }
745
746 return user_ptr;
747}
748
749static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700750{
751 struct mmu_update u;
752
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700753 u.ptr = virt_to_machine(ptr).maddr;
754 u.val = pgd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700755 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700756}
757
758/*
759 * Raw hypercall-based set_pgd, intended for in early boot before
760 * there's a page structure. This implies:
761 * 1. The only existing pagetable is the kernel's
762 * 2. It is always pinned
763 * 3. It has no user pagetable attached to it
764 */
765void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
766{
767 preempt_disable();
768
769 xen_mc_batch();
770
771 __xen_set_pgd_hyper(ptr, val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700772
773 xen_mc_issue(PARAVIRT_LAZY_MMU);
774
775 preempt_enable();
776}
777
778void xen_set_pgd(pgd_t *ptr, pgd_t val)
779{
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700780 pgd_t *user_ptr = xen_get_user_pgd(ptr);
781
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700782 ADD_STATS(pgd_update, 1);
783
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700784 /* If page is not pinned, we can just update the entry
785 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700786 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700787 *ptr = val;
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700788 if (user_ptr) {
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700789 WARN_ON(xen_page_pinned(user_ptr));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700790 *user_ptr = val;
791 }
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700792 return;
793 }
794
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700795 ADD_STATS(pgd_update_pinned, 1);
796 ADD_STATS(pgd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
797
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700798 /* If it's pinned, then we can at least batch the kernel and
799 user updates together. */
800 xen_mc_batch();
801
802 __xen_set_pgd_hyper(ptr, val);
803 if (user_ptr)
804 __xen_set_pgd_hyper(user_ptr, val);
805
806 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700807}
808#endif /* PAGETABLE_LEVELS == 4 */
809
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700810/*
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700811 * (Yet another) pagetable walker. This one is intended for pinning a
812 * pagetable. This means that it walks a pagetable and calls the
813 * callback function on each page it finds making up the page table,
814 * at every level. It walks the entire pagetable, but it only bothers
815 * pinning pte pages which are below limit. In the normal case this
816 * will be STACK_TOP_MAX, but at boot we need to pin up to
817 * FIXADDR_TOP.
818 *
819 * For 32-bit the important bit is that we don't pin beyond there,
820 * because then we start getting into Xen's ptes.
821 *
822 * For 64-bit, we must skip the Xen hole in the middle of the address
823 * space, just after the big x86-64 virtual hole.
824 */
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000825static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
826 int (*func)(struct mm_struct *mm, struct page *,
827 enum pt_level),
828 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700829{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700830 int flush = 0;
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700831 unsigned hole_low, hole_high;
832 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
833 unsigned pgdidx, pudidx, pmdidx;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700834
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700835 /* The limit is the last byte to be touched */
836 limit--;
837 BUG_ON(limit >= FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700838
839 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700840 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700841
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700842 /*
843 * 64-bit has a great big hole in the middle of the address
844 * space, which contains the Xen mappings. On 32-bit these
845 * will end up making a zero-sized hole and so is a no-op.
846 */
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700847 hole_low = pgd_index(USER_LIMIT);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700848 hole_high = pgd_index(PAGE_OFFSET);
849
850 pgdidx_limit = pgd_index(limit);
851#if PTRS_PER_PUD > 1
852 pudidx_limit = pud_index(limit);
853#else
854 pudidx_limit = 0;
855#endif
856#if PTRS_PER_PMD > 1
857 pmdidx_limit = pmd_index(limit);
858#else
859 pmdidx_limit = 0;
860#endif
861
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700862 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700863 pud_t *pud;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700864
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700865 if (pgdidx >= hole_low && pgdidx < hole_high)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700866 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700867
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700868 if (!pgd_val(pgd[pgdidx]))
869 continue;
870
871 pud = pud_offset(&pgd[pgdidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700872
873 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700874 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700875
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700876 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700877 pmd_t *pmd;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700878
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700879 if (pgdidx == pgdidx_limit &&
880 pudidx > pudidx_limit)
881 goto out;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700882
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700883 if (pud_none(pud[pudidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700884 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700885
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700886 pmd = pmd_offset(&pud[pudidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700887
888 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700889 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700890
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700891 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
892 struct page *pte;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700893
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700894 if (pgdidx == pgdidx_limit &&
895 pudidx == pudidx_limit &&
896 pmdidx > pmdidx_limit)
897 goto out;
898
899 if (pmd_none(pmd[pmdidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700900 continue;
901
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700902 pte = pmd_page(pmd[pmdidx]);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700903 flush |= (*func)(mm, pte, PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700904 }
905 }
906 }
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700907
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700908out:
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700909 /* Do the top level last, so that the callbacks can use it as
910 a cue to do final things like tlb flushes. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700911 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700912
913 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700914}
915
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000916static int xen_pgd_walk(struct mm_struct *mm,
917 int (*func)(struct mm_struct *mm, struct page *,
918 enum pt_level),
919 unsigned long limit)
920{
921 return __xen_pgd_walk(mm, mm->pgd, func, limit);
922}
923
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700924/* If we're using split pte locks, then take the page's lock and
925 return a pointer to it. Otherwise return NULL. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700926static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700927{
928 spinlock_t *ptl = NULL;
929
Jeremy Fitzhardingef7d0b922008-09-09 15:43:22 -0700930#if USE_SPLIT_PTLOCKS
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700931 ptl = __pte_lockptr(page);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700932 spin_lock_nest_lock(ptl, &mm->page_table_lock);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700933#endif
934
935 return ptl;
936}
937
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700938static void xen_pte_unlock(void *v)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700939{
940 spinlock_t *ptl = v;
941 spin_unlock(ptl);
942}
943
944static void xen_do_pin(unsigned level, unsigned long pfn)
945{
946 struct mmuext_op *op;
947 struct multicall_space mcs;
948
949 mcs = __xen_mc_entry(sizeof(*op));
950 op = mcs.args;
951 op->cmd = level;
952 op->arg1.mfn = pfn_to_mfn(pfn);
953 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
954}
955
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700956static int xen_pin_page(struct mm_struct *mm, struct page *page,
957 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700958{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700959 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700960 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700961
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700962 if (pgfl)
963 flush = 0; /* already pinned */
964 else if (PageHighMem(page))
965 /* kmaps need flushing if we found an unpinned
966 highpage */
967 flush = 1;
968 else {
969 void *pt = lowmem_page_address(page);
970 unsigned long pfn = page_to_pfn(page);
971 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700972 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700973
974 flush = 0;
975
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700976 /*
977 * We need to hold the pagetable lock between the time
978 * we make the pagetable RO and when we actually pin
979 * it. If we don't, then other users may come in and
980 * attempt to update the pagetable by writing it,
981 * which will fail because the memory is RO but not
982 * pinned, so Xen won't do the trap'n'emulate.
983 *
984 * If we're using split pte locks, we can't hold the
985 * entire pagetable's worth of locks during the
986 * traverse, because we may wrap the preempt count (8
987 * bits). The solution is to mark RO and pin each PTE
988 * page while holding the lock. This means the number
989 * of locks we end up holding is never more than a
990 * batch size (~32 entries, at present).
991 *
992 * If we're not using split pte locks, we needn't pin
993 * the PTE pages independently, because we're
994 * protected by the overall pagetable lock.
995 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700996 ptl = NULL;
997 if (level == PT_PTE)
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700998 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700999
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001000 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
1001 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001002 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
1003
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -07001004 if (ptl) {
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001005 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
1006
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001007 /* Queue a deferred unlock for when this batch
1008 is completed. */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001009 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001010 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001011 }
1012
1013 return flush;
1014}
1015
1016/* This is called just after a mm has been created, but it has not
1017 been used yet. We need to make sure that its pagetable is all
1018 read-only, and can be pinned. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001019static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001020{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001021 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001022
Ian Campbell86bbc2c2008-11-21 10:21:33 +00001023 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +11001024 /* re-enable interrupts for flushing */
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001025 xen_mc_issue(0);
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +11001026
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001027 kmap_flush_unused();
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +11001028
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001029 xen_mc_batch();
1030 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001031
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001032#ifdef CONFIG_X86_64
1033 {
1034 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1035
1036 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
1037
1038 if (user_pgd) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001039 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
Tejf63c2f22008-12-16 11:56:06 -08001040 xen_do_pin(MMUEXT_PIN_L4_TABLE,
1041 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001042 }
1043 }
1044#else /* CONFIG_X86_32 */
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001045#ifdef CONFIG_X86_PAE
1046 /* Need to make sure unshared kernel PMD is pinnable */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -08001047 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001048 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001049#endif
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +01001050 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001051#endif /* CONFIG_X86_64 */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001052 xen_mc_issue(0);
1053}
1054
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001055static void xen_pgd_pin(struct mm_struct *mm)
1056{
1057 __xen_pgd_pin(mm, mm->pgd);
1058}
1059
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001060/*
1061 * On save, we need to pin all pagetables to make sure they get their
1062 * mfns turned into pfns. Search the list for any unpinned pgds and pin
1063 * them (unpinned pgds are not currently in use, probably because the
1064 * process is under construction or destruction).
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001065 *
1066 * Expected to be called in stop_machine() ("equivalent to taking
1067 * every spinlock in the system"), so the locking doesn't really
1068 * matter all that much.
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001069 */
1070void xen_mm_pin_all(void)
1071{
1072 unsigned long flags;
1073 struct page *page;
1074
1075 spin_lock_irqsave(&pgd_lock, flags);
1076
1077 list_for_each_entry(page, &pgd_list, lru) {
1078 if (!PagePinned(page)) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001079 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001080 SetPageSavePinned(page);
1081 }
1082 }
1083
1084 spin_unlock_irqrestore(&pgd_lock, flags);
1085}
1086
Eduardo Habkostc1f2f092008-07-08 15:06:24 -07001087/*
1088 * The init_mm pagetable is really pinned as soon as its created, but
1089 * that's before we have page structures to store the bits. So do all
1090 * the book-keeping now.
1091 */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001092static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page,
1093 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001094{
1095 SetPagePinned(page);
1096 return 0;
1097}
1098
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001099static void __init xen_mark_init_mm_pinned(void)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001100{
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001101 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001102}
1103
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001104static int xen_unpin_page(struct mm_struct *mm, struct page *page,
1105 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001106{
Christoph Lameterd60cd462008-04-28 02:12:51 -07001107 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001108
1109 if (pgfl && !PageHighMem(page)) {
1110 void *pt = lowmem_page_address(page);
1111 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001112 spinlock_t *ptl = NULL;
1113 struct multicall_space mcs;
1114
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -07001115 /*
1116 * Do the converse to pin_page. If we're using split
1117 * pte locks, we must be holding the lock for while
1118 * the pte page is unpinned but still RO to prevent
1119 * concurrent updates from seeing it in this
1120 * partially-pinned state.
1121 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001122 if (level == PT_PTE) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001123 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001124
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -07001125 if (ptl)
1126 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001127 }
1128
1129 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001130
1131 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
1132 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001133 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
1134
1135 if (ptl) {
1136 /* unlock when batch completed */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001137 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001138 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001139 }
1140
1141 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001142}
1143
1144/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001145static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001146{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001147 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001148
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001149 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001150
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001151#ifdef CONFIG_X86_64
1152 {
1153 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1154
1155 if (user_pgd) {
Tejf63c2f22008-12-16 11:56:06 -08001156 xen_do_pin(MMUEXT_UNPIN_TABLE,
1157 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001158 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001159 }
1160 }
1161#endif
1162
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001163#ifdef CONFIG_X86_PAE
1164 /* Need to make sure unshared kernel PMD is unpinned */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -08001165 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001166 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001167#endif
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001168
Ian Campbell86bbc2c2008-11-21 10:21:33 +00001169 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001170
1171 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001172}
1173
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001174static void xen_pgd_unpin(struct mm_struct *mm)
1175{
1176 __xen_pgd_unpin(mm, mm->pgd);
1177}
1178
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001179/*
1180 * On resume, undo any pinning done at save, so that the rest of the
1181 * kernel doesn't see any unexpected pinned pagetables.
1182 */
1183void xen_mm_unpin_all(void)
1184{
1185 unsigned long flags;
1186 struct page *page;
1187
1188 spin_lock_irqsave(&pgd_lock, flags);
1189
1190 list_for_each_entry(page, &pgd_list, lru) {
1191 if (PageSavePinned(page)) {
1192 BUG_ON(!PagePinned(page));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001193 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001194 ClearPageSavePinned(page);
1195 }
1196 }
1197
1198 spin_unlock_irqrestore(&pgd_lock, flags);
1199}
1200
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001201void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
1202{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001203 spin_lock(&next->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001204 xen_pgd_pin(next);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001205 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001206}
1207
1208void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
1209{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001210 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001211 xen_pgd_pin(mm);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001212 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001213}
1214
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001215
1216#ifdef CONFIG_SMP
1217/* Another cpu may still have their %cr3 pointing at the pagetable, so
1218 we need to repoint it somewhere else before we can unpin it. */
1219static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001220{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001221 struct mm_struct *mm = info;
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001222 struct mm_struct *active_mm;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001223
Brian Gerst9eb912d2009-01-19 00:38:57 +09001224 active_mm = percpu_read(cpu_tlbstate.active_mm);
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001225
1226 if (active_mm == mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001227 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001228
1229 /* If this cpu still has a stale cr3 reference, then make sure
1230 it has been flushed. */
Jeremy Fitzhardinge7fd7d832009-02-17 23:24:03 -08001231 if (percpu_read(xen_current_cr3) == __pa(mm->pgd))
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001232 load_cr3(swapper_pg_dir);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001233}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001234
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001235static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001236{
Mike Travise4d98202008-12-16 17:34:05 -08001237 cpumask_var_t mask;
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001238 unsigned cpu;
1239
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001240 if (current->active_mm == mm) {
1241 if (current->mm == mm)
1242 load_cr3(swapper_pg_dir);
1243 else
1244 leave_mm(smp_processor_id());
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001245 }
1246
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001247 /* Get the "official" set of cpus referring to our pagetable. */
Mike Travise4d98202008-12-16 17:34:05 -08001248 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1249 for_each_online_cpu(cpu) {
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001250 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
Mike Travise4d98202008-12-16 17:34:05 -08001251 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1252 continue;
1253 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1254 }
1255 return;
1256 }
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001257 cpumask_copy(mask, mm_cpumask(mm));
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001258
1259 /* It's possible that a vcpu may have a stale reference to our
1260 cr3, because its in lazy mode, and it hasn't yet flushed
1261 its set of pending hypercalls yet. In this case, we can
1262 look at its actual current cr3 value, and force it to flush
1263 if needed. */
1264 for_each_online_cpu(cpu) {
1265 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
Mike Travise4d98202008-12-16 17:34:05 -08001266 cpumask_set_cpu(cpu, mask);
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001267 }
1268
Mike Travise4d98202008-12-16 17:34:05 -08001269 if (!cpumask_empty(mask))
1270 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1271 free_cpumask_var(mask);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001272}
1273#else
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001274static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001275{
1276 if (current->active_mm == mm)
1277 load_cr3(swapper_pg_dir);
1278}
1279#endif
1280
1281/*
1282 * While a process runs, Xen pins its pagetables, which means that the
1283 * hypervisor forces it to be read-only, and it controls all updates
1284 * to it. This means that all pagetable updates have to go via the
1285 * hypervisor, which is moderately expensive.
1286 *
1287 * Since we're pulling the pagetable down, we switch to use init_mm,
1288 * unpin old process pagetable and mark it all read-write, which
1289 * allows further operations on it to be simple memory accesses.
1290 *
1291 * The only subtle point is that another CPU may be still using the
1292 * pagetable because of lazy tlb flushing. This means we need need to
1293 * switch all CPUs off this pagetable before we can unpin it.
1294 */
1295void xen_exit_mmap(struct mm_struct *mm)
1296{
1297 get_cpu(); /* make sure we don't move around */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001298 xen_drop_mm_ref(mm);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001299 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001300
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001301 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -07001302
1303 /* pgd may not be pinned in the error exit path of execve */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001304 if (xen_page_pinned(mm->pgd))
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001305 xen_pgd_unpin(mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001306
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001307 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001308}
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07001309
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001310static __init void xen_pagetable_setup_start(pgd_t *base)
1311{
1312}
1313
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02001314static void xen_post_allocator_init(void);
1315
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001316static __init void xen_pagetable_setup_done(pgd_t *base)
1317{
1318 xen_setup_shared_info();
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02001319 xen_post_allocator_init();
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001320}
1321
1322static void xen_write_cr2(unsigned long cr2)
1323{
1324 percpu_read(xen_vcpu)->arch.cr2 = cr2;
1325}
1326
1327static unsigned long xen_read_cr2(void)
1328{
1329 return percpu_read(xen_vcpu)->arch.cr2;
1330}
1331
1332unsigned long xen_read_cr2_direct(void)
1333{
1334 return percpu_read(xen_vcpu_info.arch.cr2);
1335}
1336
1337static void xen_flush_tlb(void)
1338{
1339 struct mmuext_op *op;
1340 struct multicall_space mcs;
1341
1342 preempt_disable();
1343
1344 mcs = xen_mc_entry(sizeof(*op));
1345
1346 op = mcs.args;
1347 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1348 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1349
1350 xen_mc_issue(PARAVIRT_LAZY_MMU);
1351
1352 preempt_enable();
1353}
1354
1355static void xen_flush_tlb_single(unsigned long addr)
1356{
1357 struct mmuext_op *op;
1358 struct multicall_space mcs;
1359
1360 preempt_disable();
1361
1362 mcs = xen_mc_entry(sizeof(*op));
1363 op = mcs.args;
1364 op->cmd = MMUEXT_INVLPG_LOCAL;
1365 op->arg1.linear_addr = addr & PAGE_MASK;
1366 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1367
1368 xen_mc_issue(PARAVIRT_LAZY_MMU);
1369
1370 preempt_enable();
1371}
1372
1373static void xen_flush_tlb_others(const struct cpumask *cpus,
1374 struct mm_struct *mm, unsigned long va)
1375{
1376 struct {
1377 struct mmuext_op op;
1378 DECLARE_BITMAP(mask, NR_CPUS);
1379 } *args;
1380 struct multicall_space mcs;
1381
Jeremy Fitzhardingee3f8a742009-03-04 17:36:57 -08001382 if (cpumask_empty(cpus))
1383 return; /* nothing to do */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001384
1385 mcs = xen_mc_entry(sizeof(*args));
1386 args = mcs.args;
1387 args->op.arg2.vcpumask = to_cpumask(args->mask);
1388
1389 /* Remove us, and any offline CPUS. */
1390 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1391 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001392
1393 if (va == TLB_FLUSH_ALL) {
1394 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1395 } else {
1396 args->op.cmd = MMUEXT_INVLPG_MULTI;
1397 args->op.arg1.linear_addr = va;
1398 }
1399
1400 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1401
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001402 xen_mc_issue(PARAVIRT_LAZY_MMU);
1403}
1404
1405static unsigned long xen_read_cr3(void)
1406{
1407 return percpu_read(xen_cr3);
1408}
1409
1410static void set_current_cr3(void *v)
1411{
1412 percpu_write(xen_current_cr3, (unsigned long)v);
1413}
1414
1415static void __xen_write_cr3(bool kernel, unsigned long cr3)
1416{
1417 struct mmuext_op *op;
1418 struct multicall_space mcs;
1419 unsigned long mfn;
1420
1421 if (cr3)
1422 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1423 else
1424 mfn = 0;
1425
1426 WARN_ON(mfn == 0 && kernel);
1427
1428 mcs = __xen_mc_entry(sizeof(*op));
1429
1430 op = mcs.args;
1431 op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1432 op->arg1.mfn = mfn;
1433
1434 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1435
1436 if (kernel) {
1437 percpu_write(xen_cr3, cr3);
1438
1439 /* Update xen_current_cr3 once the batch has actually
1440 been submitted. */
1441 xen_mc_callback(set_current_cr3, (void *)cr3);
1442 }
1443}
1444
1445static void xen_write_cr3(unsigned long cr3)
1446{
1447 BUG_ON(preemptible());
1448
1449 xen_mc_batch(); /* disables interrupts */
1450
1451 /* Update while interrupts are disabled, so its atomic with
1452 respect to ipis */
1453 percpu_write(xen_cr3, cr3);
1454
1455 __xen_write_cr3(true, cr3);
1456
1457#ifdef CONFIG_X86_64
1458 {
1459 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1460 if (user_pgd)
1461 __xen_write_cr3(false, __pa(user_pgd));
1462 else
1463 __xen_write_cr3(false, 0);
1464 }
1465#endif
1466
1467 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1468}
1469
1470static int xen_pgd_alloc(struct mm_struct *mm)
1471{
1472 pgd_t *pgd = mm->pgd;
1473 int ret = 0;
1474
1475 BUG_ON(PagePinned(virt_to_page(pgd)));
1476
1477#ifdef CONFIG_X86_64
1478 {
1479 struct page *page = virt_to_page(pgd);
1480 pgd_t *user_pgd;
1481
1482 BUG_ON(page->private != 0);
1483
1484 ret = -ENOMEM;
1485
1486 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1487 page->private = (unsigned long)user_pgd;
1488
1489 if (user_pgd != NULL) {
1490 user_pgd[pgd_index(VSYSCALL_START)] =
1491 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1492 ret = 0;
1493 }
1494
1495 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1496 }
1497#endif
1498
1499 return ret;
1500}
1501
1502static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1503{
1504#ifdef CONFIG_X86_64
1505 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1506
1507 if (user_pgd)
1508 free_page((unsigned long)user_pgd);
1509#endif
1510}
1511
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001512#ifdef CONFIG_X86_32
1513static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1514{
1515 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1516 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1517 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1518 pte_val_ma(pte));
1519
1520 return pte;
1521}
1522
1523/* Init-time set_pte while constructing initial pagetables, which
1524 doesn't allow RO pagetable pages to be remapped RW */
1525static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
1526{
1527 pte = mask_rw_pte(ptep, pte);
1528
1529 xen_set_pte(ptep, pte);
1530}
1531#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001532
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001533static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1534{
1535 struct mmuext_op op;
1536 op.cmd = cmd;
1537 op.arg1.mfn = pfn_to_mfn(pfn);
1538 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1539 BUG();
1540}
1541
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001542/* Early in boot, while setting up the initial pagetable, assume
1543 everything is pinned. */
1544static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1545{
1546#ifdef CONFIG_FLATMEM
1547 BUG_ON(mem_map); /* should only be used early */
1548#endif
1549 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001550 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1551}
1552
1553/* Used for pmd and pud */
1554static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1555{
1556#ifdef CONFIG_FLATMEM
1557 BUG_ON(mem_map); /* should only be used early */
1558#endif
1559 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001560}
1561
1562/* Early release_pte assumes that all pts are pinned, since there's
1563 only init_mm and anything attached to that is pinned. */
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001564static __init void xen_release_pte_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001565{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001566 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001567 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1568}
1569
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001570static __init void xen_release_pmd_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001571{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001572 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001573}
1574
1575/* This needs to make sure the new pte page is pinned iff its being
1576 attached to a pinned pagetable. */
1577static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
1578{
1579 struct page *page = pfn_to_page(pfn);
1580
1581 if (PagePinned(virt_to_page(mm->pgd))) {
1582 SetPagePinned(page);
1583
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001584 if (!PageHighMem(page)) {
1585 make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
1586 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1587 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1588 } else {
1589 /* make sure there are no stray mappings of
1590 this page */
1591 kmap_flush_unused();
1592 }
1593 }
1594}
1595
1596static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1597{
1598 xen_alloc_ptpage(mm, pfn, PT_PTE);
1599}
1600
1601static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1602{
1603 xen_alloc_ptpage(mm, pfn, PT_PMD);
1604}
1605
1606/* This should never happen until we're OK to use struct page */
1607static void xen_release_ptpage(unsigned long pfn, unsigned level)
1608{
1609 struct page *page = pfn_to_page(pfn);
1610
1611 if (PagePinned(page)) {
1612 if (!PageHighMem(page)) {
1613 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1614 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1615 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1616 }
1617 ClearPagePinned(page);
1618 }
1619}
1620
1621static void xen_release_pte(unsigned long pfn)
1622{
1623 xen_release_ptpage(pfn, PT_PTE);
1624}
1625
1626static void xen_release_pmd(unsigned long pfn)
1627{
1628 xen_release_ptpage(pfn, PT_PMD);
1629}
1630
1631#if PAGETABLE_LEVELS == 4
1632static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1633{
1634 xen_alloc_ptpage(mm, pfn, PT_PUD);
1635}
1636
1637static void xen_release_pud(unsigned long pfn)
1638{
1639 xen_release_ptpage(pfn, PT_PUD);
1640}
1641#endif
1642
1643void __init xen_reserve_top(void)
1644{
1645#ifdef CONFIG_X86_32
1646 unsigned long top = HYPERVISOR_VIRT_START;
1647 struct xen_platform_parameters pp;
1648
1649 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1650 top = pp.virt_start;
1651
1652 reserve_top_address(-top);
1653#endif /* CONFIG_X86_32 */
1654}
1655
1656/*
1657 * Like __va(), but returns address in the kernel mapping (which is
1658 * all we have until the physical memory mapping has been set up.
1659 */
1660static void *__ka(phys_addr_t paddr)
1661{
1662#ifdef CONFIG_X86_64
1663 return (void *)(paddr + __START_KERNEL_map);
1664#else
1665 return __va(paddr);
1666#endif
1667}
1668
1669/* Convert a machine address to physical address */
1670static unsigned long m2p(phys_addr_t maddr)
1671{
1672 phys_addr_t paddr;
1673
1674 maddr &= PTE_PFN_MASK;
1675 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1676
1677 return paddr;
1678}
1679
1680/* Convert a machine address to kernel virtual */
1681static void *m2v(phys_addr_t maddr)
1682{
1683 return __ka(m2p(maddr));
1684}
1685
1686static void set_page_prot(void *addr, pgprot_t prot)
1687{
1688 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1689 pte_t pte = pfn_pte(pfn, prot);
1690
1691 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1692 BUG();
1693}
1694
1695static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1696{
1697 unsigned pmdidx, pteidx;
1698 unsigned ident_pte;
1699 unsigned long pfn;
1700
1701 ident_pte = 0;
1702 pfn = 0;
1703 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1704 pte_t *pte_page;
1705
1706 /* Reuse or allocate a page of ptes */
1707 if (pmd_present(pmd[pmdidx]))
1708 pte_page = m2v(pmd[pmdidx].pmd);
1709 else {
1710 /* Check for free pte pages */
1711 if (ident_pte == ARRAY_SIZE(level1_ident_pgt))
1712 break;
1713
1714 pte_page = &level1_ident_pgt[ident_pte];
1715 ident_pte += PTRS_PER_PTE;
1716
1717 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1718 }
1719
1720 /* Install mappings */
1721 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1722 pte_t pte;
1723
1724 if (pfn > max_pfn_mapped)
1725 max_pfn_mapped = pfn;
1726
1727 if (!pte_none(pte_page[pteidx]))
1728 continue;
1729
1730 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1731 pte_page[pteidx] = pte;
1732 }
1733 }
1734
1735 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1736 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1737
1738 set_page_prot(pmd, PAGE_KERNEL_RO);
1739}
1740
1741#ifdef CONFIG_X86_64
1742static void convert_pfn_mfn(void *v)
1743{
1744 pte_t *pte = v;
1745 int i;
1746
1747 /* All levels are converted the same way, so just treat them
1748 as ptes. */
1749 for (i = 0; i < PTRS_PER_PTE; i++)
1750 pte[i] = xen_make_pte(pte[i].pte);
1751}
1752
1753/*
1754 * Set up the inital kernel pagetable.
1755 *
1756 * We can construct this by grafting the Xen provided pagetable into
1757 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1758 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
1759 * means that only the kernel has a physical mapping to start with -
1760 * but that's enough to get __va working. We need to fill in the rest
1761 * of the physical mapping once some sort of allocator has been set
1762 * up.
1763 */
1764__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1765 unsigned long max_pfn)
1766{
1767 pud_t *l3;
1768 pmd_t *l2;
1769
1770 /* Zap identity mapping */
1771 init_level4_pgt[0] = __pgd(0);
1772
1773 /* Pre-constructed entries are in pfn, so convert to mfn */
1774 convert_pfn_mfn(init_level4_pgt);
1775 convert_pfn_mfn(level3_ident_pgt);
1776 convert_pfn_mfn(level3_kernel_pgt);
1777
1778 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1779 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1780
1781 memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1782 memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1783
1784 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1785 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
1786 memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1787
1788 /* Set up identity map */
1789 xen_map_identity_early(level2_ident_pgt, max_pfn);
1790
1791 /* Make pagetable pieces RO */
1792 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1793 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1794 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1795 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1796 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1797 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1798
1799 /* Pin down new L4 */
1800 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1801 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1802
1803 /* Unpin Xen-provided one */
1804 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1805
1806 /* Switch over */
1807 pgd = init_level4_pgt;
1808
1809 /*
1810 * At this stage there can be no user pgd, and no page
1811 * structure to attach it to, so make sure we just set kernel
1812 * pgd.
1813 */
1814 xen_mc_batch();
1815 __xen_write_cr3(true, __pa(pgd));
1816 xen_mc_issue(PARAVIRT_LAZY_CPU);
1817
Yinghai Lua9ce6bc2010-08-25 13:39:17 -07001818 memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001819 __pa(xen_start_info->pt_base +
1820 xen_start_info->nr_pt_frames * PAGE_SIZE),
1821 "XEN PAGETABLES");
1822
1823 return pgd;
1824}
1825#else /* !CONFIG_X86_64 */
1826static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss;
1827
1828__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1829 unsigned long max_pfn)
1830{
1831 pmd_t *kernel_pmd;
1832
Jeremy Fitzhardinge93dbda72009-02-26 17:35:44 -08001833 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
1834 xen_start_info->nr_pt_frames * PAGE_SIZE +
1835 512*1024);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001836
1837 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
1838 memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
1839
1840 xen_map_identity_early(level2_kernel_pgt, max_pfn);
1841
1842 memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
1843 set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY],
1844 __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT));
1845
1846 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1847 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1848 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
1849
1850 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1851
1852 xen_write_cr3(__pa(swapper_pg_dir));
1853
1854 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));
1855
Yinghai Lua9ce6bc2010-08-25 13:39:17 -07001856 memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
Jeremy Fitzhardinge33df4db2009-05-07 11:56:44 -07001857 __pa(xen_start_info->pt_base +
1858 xen_start_info->nr_pt_frames * PAGE_SIZE),
1859 "XEN PAGETABLES");
1860
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001861 return swapper_pg_dir;
1862}
1863#endif /* CONFIG_X86_64 */
1864
Masami Hiramatsu3b3809a2009-04-09 10:55:33 -07001865static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001866{
1867 pte_t pte;
1868
1869 phys >>= PAGE_SHIFT;
1870
1871 switch (idx) {
1872 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
1873#ifdef CONFIG_X86_F00F_BUG
1874 case FIX_F00F_IDT:
1875#endif
1876#ifdef CONFIG_X86_32
1877 case FIX_WP_TEST:
1878 case FIX_VDSO:
1879# ifdef CONFIG_HIGHMEM
1880 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
1881# endif
1882#else
1883 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
1884#endif
1885#ifdef CONFIG_X86_LOCAL_APIC
1886 case FIX_APIC_BASE: /* maps dummy local APIC */
1887#endif
Jeremy Fitzhardinge3ecb1b72009-03-07 23:48:41 -08001888 case FIX_TEXT_POKE0:
1889 case FIX_TEXT_POKE1:
1890 /* All local page mappings */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001891 pte = pfn_pte(phys, prot);
1892 break;
1893
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08001894 case FIX_PARAVIRT_BOOTMAP:
1895 /* This is an MFN, but it isn't an IO mapping from the
1896 IO domain */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001897 pte = mfn_pte(phys, prot);
1898 break;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08001899
1900 default:
1901 /* By default, set_fixmap is used for hardware mappings */
1902 pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP));
1903 break;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001904 }
1905
1906 __native_set_fixmap(idx, pte);
1907
1908#ifdef CONFIG_X86_64
1909 /* Replicate changes to map the vsyscall page into the user
1910 pagetable vsyscall mapping. */
1911 if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) {
1912 unsigned long vaddr = __fix_to_virt(idx);
1913 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
1914 }
1915#endif
1916}
1917
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02001918static __init void xen_post_allocator_init(void)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001919{
1920 pv_mmu_ops.set_pte = xen_set_pte;
1921 pv_mmu_ops.set_pmd = xen_set_pmd;
1922 pv_mmu_ops.set_pud = xen_set_pud;
1923#if PAGETABLE_LEVELS == 4
1924 pv_mmu_ops.set_pgd = xen_set_pgd;
1925#endif
1926
1927 /* This will work as long as patching hasn't happened yet
1928 (which it hasn't) */
1929 pv_mmu_ops.alloc_pte = xen_alloc_pte;
1930 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
1931 pv_mmu_ops.release_pte = xen_release_pte;
1932 pv_mmu_ops.release_pmd = xen_release_pmd;
1933#if PAGETABLE_LEVELS == 4
1934 pv_mmu_ops.alloc_pud = xen_alloc_pud;
1935 pv_mmu_ops.release_pud = xen_release_pud;
1936#endif
1937
1938#ifdef CONFIG_X86_64
1939 SetPagePinned(virt_to_page(level3_user_vsyscall));
1940#endif
1941 xen_mark_init_mm_pinned();
1942}
1943
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08001944static void xen_leave_lazy_mmu(void)
1945{
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08001946 preempt_disable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08001947 xen_mc_flush();
1948 paravirt_leave_lazy_mmu();
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08001949 preempt_enable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08001950}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001951
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02001952static const struct pv_mmu_ops xen_mmu_ops __initdata = {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001953 .read_cr2 = xen_read_cr2,
1954 .write_cr2 = xen_write_cr2,
1955
1956 .read_cr3 = xen_read_cr3,
1957 .write_cr3 = xen_write_cr3,
1958
1959 .flush_tlb_user = xen_flush_tlb,
1960 .flush_tlb_kernel = xen_flush_tlb,
1961 .flush_tlb_single = xen_flush_tlb_single,
1962 .flush_tlb_others = xen_flush_tlb_others,
1963
1964 .pte_update = paravirt_nop,
1965 .pte_update_defer = paravirt_nop,
1966
1967 .pgd_alloc = xen_pgd_alloc,
1968 .pgd_free = xen_pgd_free,
1969
1970 .alloc_pte = xen_alloc_pte_init,
1971 .release_pte = xen_release_pte_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001972 .alloc_pmd = xen_alloc_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001973 .alloc_pmd_clone = paravirt_nop,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001974 .release_pmd = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001975
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001976#ifdef CONFIG_X86_64
1977 .set_pte = xen_set_pte,
1978#else
1979 .set_pte = xen_set_pte_init,
1980#endif
1981 .set_pte_at = xen_set_pte_at,
1982 .set_pmd = xen_set_pmd_hyper,
1983
1984 .ptep_modify_prot_start = __ptep_modify_prot_start,
1985 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
1986
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08001987 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
1988 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001989
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08001990 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
1991 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001992
1993#ifdef CONFIG_X86_PAE
1994 .set_pte_atomic = xen_set_pte_atomic,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001995 .pte_clear = xen_pte_clear,
1996 .pmd_clear = xen_pmd_clear,
1997#endif /* CONFIG_X86_PAE */
1998 .set_pud = xen_set_pud_hyper,
1999
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002000 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2001 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002002
2003#if PAGETABLE_LEVELS == 4
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002004 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2005 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002006 .set_pgd = xen_set_pgd_hyper,
2007
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002008 .alloc_pud = xen_alloc_pmd_init,
2009 .release_pud = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002010#endif /* PAGETABLE_LEVELS == 4 */
2011
2012 .activate_mm = xen_activate_mm,
2013 .dup_mmap = xen_dup_mmap,
2014 .exit_mmap = xen_exit_mmap,
2015
2016 .lazy_mode = {
2017 .enter = paravirt_enter_lazy_mmu,
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002018 .leave = xen_leave_lazy_mmu,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002019 },
2020
2021 .set_fixmap = xen_set_fixmap,
2022};
2023
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002024void __init xen_init_mmu_ops(void)
2025{
2026 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
2027 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
2028 pv_mmu_ops = xen_mmu_ops;
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -07002029
2030 vmap_lazy_unmap = false;
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002031}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002032
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002033/* Protected by xen_reservation_lock. */
2034#define MAX_CONTIG_ORDER 9 /* 2MB */
2035static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2036
2037#define VOID_PTE (mfn_pte(0, __pgprot(0)))
2038static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2039 unsigned long *in_frames,
2040 unsigned long *out_frames)
2041{
2042 int i;
2043 struct multicall_space mcs;
2044
2045 xen_mc_batch();
2046 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2047 mcs = __xen_mc_entry(0);
2048
2049 if (in_frames)
2050 in_frames[i] = virt_to_mfn(vaddr);
2051
2052 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
2053 set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
2054
2055 if (out_frames)
2056 out_frames[i] = virt_to_pfn(vaddr);
2057 }
2058 xen_mc_issue(0);
2059}
2060
2061/*
2062 * Update the pfn-to-mfn mappings for a virtual address range, either to
2063 * point to an array of mfns, or contiguously from a single starting
2064 * mfn.
2065 */
2066static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2067 unsigned long *mfns,
2068 unsigned long first_mfn)
2069{
2070 unsigned i, limit;
2071 unsigned long mfn;
2072
2073 xen_mc_batch();
2074
2075 limit = 1u << order;
2076 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2077 struct multicall_space mcs;
2078 unsigned flags;
2079
2080 mcs = __xen_mc_entry(0);
2081 if (mfns)
2082 mfn = mfns[i];
2083 else
2084 mfn = first_mfn + i;
2085
2086 if (i < (limit - 1))
2087 flags = 0;
2088 else {
2089 if (order == 0)
2090 flags = UVMF_INVLPG | UVMF_ALL;
2091 else
2092 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2093 }
2094
2095 MULTI_update_va_mapping(mcs.mc, vaddr,
2096 mfn_pte(mfn, PAGE_KERNEL), flags);
2097
2098 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2099 }
2100
2101 xen_mc_issue(0);
2102}
2103
2104/*
2105 * Perform the hypercall to exchange a region of our pfns to point to
2106 * memory with the required contiguous alignment. Takes the pfns as
2107 * input, and populates mfns as output.
2108 *
2109 * Returns a success code indicating whether the hypervisor was able to
2110 * satisfy the request or not.
2111 */
2112static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2113 unsigned long *pfns_in,
2114 unsigned long extents_out,
2115 unsigned int order_out,
2116 unsigned long *mfns_out,
2117 unsigned int address_bits)
2118{
2119 long rc;
2120 int success;
2121
2122 struct xen_memory_exchange exchange = {
2123 .in = {
2124 .nr_extents = extents_in,
2125 .extent_order = order_in,
2126 .extent_start = pfns_in,
2127 .domid = DOMID_SELF
2128 },
2129 .out = {
2130 .nr_extents = extents_out,
2131 .extent_order = order_out,
2132 .extent_start = mfns_out,
2133 .address_bits = address_bits,
2134 .domid = DOMID_SELF
2135 }
2136 };
2137
2138 BUG_ON(extents_in << order_in != extents_out << order_out);
2139
2140 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2141 success = (exchange.nr_exchanged == extents_in);
2142
2143 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2144 BUG_ON(success && (rc != 0));
2145
2146 return success;
2147}
2148
2149int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
2150 unsigned int address_bits)
2151{
2152 unsigned long *in_frames = discontig_frames, out_frame;
2153 unsigned long flags;
2154 int success;
2155
2156 /*
2157 * Currently an auto-translated guest will not perform I/O, nor will
2158 * it require PAE page directories below 4GB. Therefore any calls to
2159 * this function are redundant and can be ignored.
2160 */
2161
2162 if (xen_feature(XENFEAT_auto_translated_physmap))
2163 return 0;
2164
2165 if (unlikely(order > MAX_CONTIG_ORDER))
2166 return -ENOMEM;
2167
2168 memset((void *) vstart, 0, PAGE_SIZE << order);
2169
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002170 spin_lock_irqsave(&xen_reservation_lock, flags);
2171
2172 /* 1. Zap current PTEs, remembering MFNs. */
2173 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2174
2175 /* 2. Get a new contiguous memory extent. */
2176 out_frame = virt_to_pfn(vstart);
2177 success = xen_exchange_memory(1UL << order, 0, in_frames,
2178 1, order, &out_frame,
2179 address_bits);
2180
2181 /* 3. Map the new extent in place of old pages. */
2182 if (success)
2183 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2184 else
2185 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2186
2187 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2188
2189 return success ? 0 : -ENOMEM;
2190}
2191EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2192
2193void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
2194{
2195 unsigned long *out_frames = discontig_frames, in_frame;
2196 unsigned long flags;
2197 int success;
2198
2199 if (xen_feature(XENFEAT_auto_translated_physmap))
2200 return;
2201
2202 if (unlikely(order > MAX_CONTIG_ORDER))
2203 return;
2204
2205 memset((void *) vstart, 0, PAGE_SIZE << order);
2206
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002207 spin_lock_irqsave(&xen_reservation_lock, flags);
2208
2209 /* 1. Find start MFN of contiguous extent. */
2210 in_frame = virt_to_mfn(vstart);
2211
2212 /* 2. Zap current PTEs. */
2213 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2214
2215 /* 3. Do the exchange for non-contiguous MFNs. */
2216 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2217 0, out_frames, 0);
2218
2219 /* 4. Map new pages in place of old pages. */
2220 if (success)
2221 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2222 else
2223 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2224
2225 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2226}
2227EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2228
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002229#ifdef CONFIG_XEN_PVHVM
Stefano Stabellini59151002010-06-17 14:22:52 +01002230static void xen_hvm_exit_mmap(struct mm_struct *mm)
2231{
2232 struct xen_hvm_pagetable_dying a;
2233 int rc;
2234
2235 a.domid = DOMID_SELF;
2236 a.gpa = __pa(mm->pgd);
2237 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2238 WARN_ON_ONCE(rc < 0);
2239}
2240
2241static int is_pagetable_dying_supported(void)
2242{
2243 struct xen_hvm_pagetable_dying a;
2244 int rc = 0;
2245
2246 a.domid = DOMID_SELF;
2247 a.gpa = 0x00;
2248 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2249 if (rc < 0) {
2250 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
2251 return 0;
2252 }
2253 return 1;
2254}
2255
2256void __init xen_hvm_init_mmu_ops(void)
2257{
2258 if (is_pagetable_dying_supported())
2259 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
2260}
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002261#endif
Stefano Stabellini59151002010-06-17 14:22:52 +01002262
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07002263#ifdef CONFIG_XEN_DEBUG_FS
2264
2265static struct dentry *d_mmu_debug;
2266
2267static int __init xen_mmu_debugfs(void)
2268{
2269 struct dentry *d_xen = xen_init_debugfs();
2270
2271 if (d_xen == NULL)
2272 return -ENOMEM;
2273
2274 d_mmu_debug = debugfs_create_dir("mmu", d_xen);
2275
2276 debugfs_create_u8("zero_stats", 0644, d_mmu_debug, &zero_stats);
2277
2278 debugfs_create_u32("pgd_update", 0444, d_mmu_debug, &mmu_stats.pgd_update);
2279 debugfs_create_u32("pgd_update_pinned", 0444, d_mmu_debug,
2280 &mmu_stats.pgd_update_pinned);
2281 debugfs_create_u32("pgd_update_batched", 0444, d_mmu_debug,
2282 &mmu_stats.pgd_update_pinned);
2283
2284 debugfs_create_u32("pud_update", 0444, d_mmu_debug, &mmu_stats.pud_update);
2285 debugfs_create_u32("pud_update_pinned", 0444, d_mmu_debug,
2286 &mmu_stats.pud_update_pinned);
2287 debugfs_create_u32("pud_update_batched", 0444, d_mmu_debug,
2288 &mmu_stats.pud_update_pinned);
2289
2290 debugfs_create_u32("pmd_update", 0444, d_mmu_debug, &mmu_stats.pmd_update);
2291 debugfs_create_u32("pmd_update_pinned", 0444, d_mmu_debug,
2292 &mmu_stats.pmd_update_pinned);
2293 debugfs_create_u32("pmd_update_batched", 0444, d_mmu_debug,
2294 &mmu_stats.pmd_update_pinned);
2295
2296 debugfs_create_u32("pte_update", 0444, d_mmu_debug, &mmu_stats.pte_update);
2297// debugfs_create_u32("pte_update_pinned", 0444, d_mmu_debug,
2298// &mmu_stats.pte_update_pinned);
2299 debugfs_create_u32("pte_update_batched", 0444, d_mmu_debug,
2300 &mmu_stats.pte_update_pinned);
2301
2302 debugfs_create_u32("mmu_update", 0444, d_mmu_debug, &mmu_stats.mmu_update);
2303 debugfs_create_u32("mmu_update_extended", 0444, d_mmu_debug,
2304 &mmu_stats.mmu_update_extended);
2305 xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug,
2306 mmu_stats.mmu_update_histo, 20);
2307
2308 debugfs_create_u32("set_pte_at", 0444, d_mmu_debug, &mmu_stats.set_pte_at);
2309 debugfs_create_u32("set_pte_at_batched", 0444, d_mmu_debug,
2310 &mmu_stats.set_pte_at_batched);
2311 debugfs_create_u32("set_pte_at_current", 0444, d_mmu_debug,
2312 &mmu_stats.set_pte_at_current);
2313 debugfs_create_u32("set_pte_at_kernel", 0444, d_mmu_debug,
2314 &mmu_stats.set_pte_at_kernel);
2315
2316 debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit);
2317 debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug,
2318 &mmu_stats.prot_commit_batched);
2319
2320 return 0;
2321}
2322fs_initcall(xen_mmu_debugfs);
2323
2324#endif /* CONFIG_XEN_DEBUG_FS */