blob: fba55b1a40217f93dad87242d3d7584a5d9665b2 [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070043#include <linux/debugfs.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044#include <linux/bug.h>
Randy Dunlap44408ad2009-05-12 13:31:40 -070045#include <linux/module.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070046
47#include <asm/pgtable.h>
48#include <asm/tlbflush.h>
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -070049#include <asm/fixmap.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070050#include <asm/mmu_context.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080051#include <asm/setup.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070052#include <asm/paravirt.h>
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070053#include <asm/linkage.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070054
55#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070056#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070057
58#include <xen/page.h>
59#include <xen/interface/xen.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080060#include <xen/interface/version.h>
61#include <xen/hvc-console.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070062
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070063#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070064#include "mmu.h"
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070065#include "debugfs.h"
66
67#define MMU_UPDATE_HISTO 30
68
69#ifdef CONFIG_XEN_DEBUG_FS
70
71static struct {
72 u32 pgd_update;
73 u32 pgd_update_pinned;
74 u32 pgd_update_batched;
75
76 u32 pud_update;
77 u32 pud_update_pinned;
78 u32 pud_update_batched;
79
80 u32 pmd_update;
81 u32 pmd_update_pinned;
82 u32 pmd_update_batched;
83
84 u32 pte_update;
85 u32 pte_update_pinned;
86 u32 pte_update_batched;
87
88 u32 mmu_update;
89 u32 mmu_update_extended;
90 u32 mmu_update_histo[MMU_UPDATE_HISTO];
91
92 u32 prot_commit;
93 u32 prot_commit_batched;
94
95 u32 set_pte_at;
96 u32 set_pte_at_batched;
97 u32 set_pte_at_pinned;
98 u32 set_pte_at_current;
99 u32 set_pte_at_kernel;
100} mmu_stats;
101
102static u8 zero_stats;
103
104static inline void check_zero(void)
105{
106 if (unlikely(zero_stats)) {
107 memset(&mmu_stats, 0, sizeof(mmu_stats));
108 zero_stats = 0;
109 }
110}
111
112#define ADD_STATS(elem, val) \
113 do { check_zero(); mmu_stats.elem += (val); } while(0)
114
115#else /* !CONFIG_XEN_DEBUG_FS */
116
117#define ADD_STATS(elem, val) do { (void)(val); } while(0)
118
119#endif /* CONFIG_XEN_DEBUG_FS */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700120
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -0800121
122/*
123 * Identity map, in addition to plain kernel map. This needs to be
124 * large enough to allocate page table pages to allocate the rest.
125 * Each page can map 2MB.
126 */
127static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss;
128
129#ifdef CONFIG_X86_64
130/* l3 pud for userspace vsyscall mapping */
131static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
132#endif /* CONFIG_X86_64 */
133
134/*
135 * Note about cr3 (pagetable base) values:
136 *
137 * xen_cr3 contains the current logical cr3 value; it contains the
138 * last set cr3. This may not be the current effective cr3, because
139 * its update may be being lazily deferred. However, a vcpu looking
140 * at its own cr3 can use this value knowing that it everything will
141 * be self-consistent.
142 *
143 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
144 * hypercall to set the vcpu cr3 is complete (so it may be a little
145 * out of date, but it will never be set early). If one vcpu is
146 * looking at another vcpu's cr3 value, it should use this variable.
147 */
148DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
149DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
150
151
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700152/*
153 * Just beyond the highest usermode address. STACK_TOP_MAX has a
154 * redzone above it, so round it up to a PGD boundary.
155 */
156#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
157
158
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100159#define P2M_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100160#define TOP_ENTRIES (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100161
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100162/* Placeholder for holes in the address space */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -0700163static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE] __page_aligned_data =
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100164 { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };
165
166 /* Array of pointers to pages containing p2m entries */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -0700167static unsigned long *p2m_top[TOP_ENTRIES] __page_aligned_data =
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100168 { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100169
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100170/* Arrays of p2m arrays expressed in mfns used for save/restore */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -0700171static unsigned long p2m_top_mfn[TOP_ENTRIES] __page_aligned_bss;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100172
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -0700173static unsigned long p2m_top_mfn_list[TOP_ENTRIES / P2M_ENTRIES_PER_PAGE]
174 __page_aligned_bss;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100175
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100176static inline unsigned p2m_top_index(unsigned long pfn)
177{
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100178 BUG_ON(pfn >= MAX_DOMAIN_PAGES);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100179 return pfn / P2M_ENTRIES_PER_PAGE;
180}
181
182static inline unsigned p2m_index(unsigned long pfn)
183{
184 return pfn % P2M_ENTRIES_PER_PAGE;
185}
186
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100187/* Build the parallel p2m_top_mfn structures */
Jeremy Fitzhardingecdaead62009-02-27 15:34:59 -0800188static void __init xen_build_mfn_list_list(void)
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100189{
190 unsigned pfn, idx;
191
Tejf63c2f22008-12-16 11:56:06 -0800192 for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100193 unsigned topidx = p2m_top_index(pfn);
194
195 p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]);
196 }
197
Tejf63c2f22008-12-16 11:56:06 -0800198 for (idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100199 unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
200 p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
201 }
Jeremy Fitzhardingecdaead62009-02-27 15:34:59 -0800202}
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100203
Jeremy Fitzhardingecdaead62009-02-27 15:34:59 -0800204void xen_setup_mfn_list_list(void)
205{
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100206 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
207
208 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
209 virt_to_mfn(p2m_top_mfn_list);
210 HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages;
211}
212
213/* Set up p2m_top to point to the domain-builder provided p2m pages */
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100214void __init xen_build_dynamic_phys_to_machine(void)
215{
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100216 unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100217 unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100218 unsigned pfn;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100219
Tejf63c2f22008-12-16 11:56:06 -0800220 for (pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100221 unsigned topidx = p2m_top_index(pfn);
222
223 p2m_top[topidx] = &mfn_list[pfn];
224 }
Jeremy Fitzhardingecdaead62009-02-27 15:34:59 -0800225
226 xen_build_mfn_list_list();
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100227}
228
229unsigned long get_phys_to_machine(unsigned long pfn)
230{
231 unsigned topidx, idx;
232
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100233 if (unlikely(pfn >= MAX_DOMAIN_PAGES))
234 return INVALID_P2M_ENTRY;
235
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100236 topidx = p2m_top_index(pfn);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100237 idx = p2m_index(pfn);
238 return p2m_top[topidx][idx];
239}
Ingo Molnar15ce60052008-06-02 13:20:11 +0200240EXPORT_SYMBOL_GPL(get_phys_to_machine);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100241
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800242/* install a new p2m_top page */
243bool install_p2mtop_page(unsigned long pfn, unsigned long *p)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100244{
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800245 unsigned topidx = p2m_top_index(pfn);
246 unsigned long **pfnp, *mfnp;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100247 unsigned i;
248
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800249 pfnp = &p2m_top[topidx];
250 mfnp = &p2m_top_mfn[topidx];
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100251
Tejf63c2f22008-12-16 11:56:06 -0800252 for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100253 p[i] = INVALID_P2M_ENTRY;
254
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800255 if (cmpxchg(pfnp, p2m_missing, p) == p2m_missing) {
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100256 *mfnp = virt_to_mfn(p);
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800257 return true;
258 }
259
260 return false;
261}
262
263static void alloc_p2m(unsigned long pfn)
264{
265 unsigned long *p;
266
267 p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
268 BUG_ON(p == NULL);
269
270 if (!install_p2mtop_page(pfn, p))
271 free_page((unsigned long)p);
272}
273
274/* Try to install p2m mapping; fail if intermediate bits missing */
275bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
276{
277 unsigned topidx, idx;
278
279 if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
280 BUG_ON(mfn != INVALID_P2M_ENTRY);
281 return true;
282 }
283
284 topidx = p2m_top_index(pfn);
285 if (p2m_top[topidx] == p2m_missing) {
286 if (mfn == INVALID_P2M_ENTRY)
287 return true;
288 return false;
289 }
290
291 idx = p2m_index(pfn);
292 p2m_top[topidx][idx] = mfn;
293
294 return true;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100295}
296
297void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
298{
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100299 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
300 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
301 return;
302 }
303
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800304 if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
305 alloc_p2m(pfn);
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100306
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800307 if (!__set_phys_to_machine(pfn, mfn))
308 BUG();
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100309 }
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100310}
311
Jeremy Fitzhardinge9976b392009-02-27 09:19:26 -0800312unsigned long arbitrary_virt_to_mfn(void *vaddr)
313{
314 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
315
316 return PFN_DOWN(maddr.maddr);
317}
318
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700319xmaddr_t arbitrary_virt_to_machine(void *vaddr)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700320{
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700321 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100322 unsigned int level;
Chris Lalancette9f32d212008-10-23 17:40:25 -0700323 pte_t *pte;
324 unsigned offset;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700325
Chris Lalancette9f32d212008-10-23 17:40:25 -0700326 /*
327 * if the PFN is in the linear mapped vaddr range, we can just use
328 * the (quick) virt_to_machine() p2m lookup
329 */
330 if (virt_addr_valid(vaddr))
331 return virt_to_machine(vaddr);
332
333 /* otherwise we have to do a (slower) full page-table walk */
334
335 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700336 BUG_ON(pte == NULL);
Chris Lalancette9f32d212008-10-23 17:40:25 -0700337 offset = address & ~PAGE_MASK;
Jeremy Fitzhardingeebd879e2008-07-08 15:06:54 -0700338 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700339}
340
341void make_lowmem_page_readonly(void *vaddr)
342{
343 pte_t *pte, ptev;
344 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100345 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700346
Ingo Molnarf0646e42008-01-30 13:33:43 +0100347 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700348 BUG_ON(pte == NULL);
349
350 ptev = pte_wrprotect(*pte);
351
352 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
353 BUG();
354}
355
356void make_lowmem_page_readwrite(void *vaddr)
357{
358 pte_t *pte, ptev;
359 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100360 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700361
Ingo Molnarf0646e42008-01-30 13:33:43 +0100362 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700363 BUG_ON(pte == NULL);
364
365 ptev = pte_mkwrite(*pte);
366
367 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
368 BUG();
369}
370
371
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700372static bool xen_page_pinned(void *ptr)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100373{
374 struct page *page = virt_to_page(ptr);
375
376 return PagePinned(page);
377}
378
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700379static void xen_extend_mmu_update(const struct mmu_update *update)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700380{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700381 struct multicall_space mcs;
382 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700383
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700384 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
385
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700386 if (mcs.mc != NULL) {
387 ADD_STATS(mmu_update_extended, 1);
388 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1);
389
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700390 mcs.mc->args[1]++;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700391
392 if (mcs.mc->args[1] < MMU_UPDATE_HISTO)
393 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1);
394 else
395 ADD_STATS(mmu_update_histo[0], 1);
396 } else {
397 ADD_STATS(mmu_update, 1);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700398 mcs = __xen_mc_entry(sizeof(*u));
399 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700400 ADD_STATS(mmu_update_histo[1], 1);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700401 }
402
403 u = mcs.args;
404 *u = *update;
405}
406
407void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
408{
409 struct mmu_update u;
410
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700411 preempt_disable();
412
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700413 xen_mc_batch();
414
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700415 /* ptr may be ioremapped for 64-bit pagetable setup */
416 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700417 u.val = pmd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700418 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700419
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700420 ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
421
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700422 xen_mc_issue(PARAVIRT_LAZY_MMU);
423
424 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700425}
426
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100427void xen_set_pmd(pmd_t *ptr, pmd_t val)
428{
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700429 ADD_STATS(pmd_update, 1);
430
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100431 /* If page is not pinned, we can just update the entry
432 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700433 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100434 *ptr = val;
435 return;
436 }
437
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700438 ADD_STATS(pmd_update_pinned, 1);
439
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100440 xen_set_pmd_hyper(ptr, val);
441}
442
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700443/*
444 * Associate a virtual page frame with a given physical page frame
445 * and protection flags for that frame.
446 */
447void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
448{
Jeremy Fitzhardinge836fe2f2008-07-08 15:06:58 -0700449 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700450}
451
452void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
453 pte_t *ptep, pte_t pteval)
454{
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700455 /* updates to init_mm may be done without lock */
456 if (mm == &init_mm)
457 preempt_disable();
458
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700459 ADD_STATS(set_pte_at, 1);
460// ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep));
461 ADD_STATS(set_pte_at_current, mm == current->mm);
462 ADD_STATS(set_pte_at_kernel, mm == &init_mm);
463
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700464 if (mm == current->mm || mm == &init_mm) {
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700465 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700466 struct multicall_space mcs;
467 mcs = xen_mc_entry(0);
468
469 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700470 ADD_STATS(set_pte_at_batched, 1);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700471 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700472 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700473 } else
474 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700475 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700476 }
477 xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700478
479out:
480 if (mm == &init_mm)
481 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700482}
483
Tejf63c2f22008-12-16 11:56:06 -0800484pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
485 unsigned long addr, pte_t *ptep)
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700486{
487 /* Just return the pte as-is. We preserve the bits on commit */
488 return *ptep;
489}
490
491void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
492 pte_t *ptep, pte_t pte)
493{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700494 struct mmu_update u;
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700495
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700496 xen_mc_batch();
497
Chris Lalancette9f32d212008-10-23 17:40:25 -0700498 u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700499 u.val = pte_val_ma(pte);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700500 xen_extend_mmu_update(&u);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700501
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700502 ADD_STATS(prot_commit, 1);
503 ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
504
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700505 xen_mc_issue(PARAVIRT_LAZY_MMU);
506}
507
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700508/* Assume pteval_t is equivalent to all the other *val_t types. */
509static pteval_t pte_mfn_to_pfn(pteval_t val)
510{
511 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700512 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700513 pteval_t flags = val & PTE_FLAGS_MASK;
Jeremy Fitzhardinged8355ac2008-07-03 22:10:18 -0700514 val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700515 }
516
517 return val;
518}
519
520static pteval_t pte_pfn_to_mfn(pteval_t val)
521{
522 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700523 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700524 pteval_t flags = val & PTE_FLAGS_MASK;
Jeremy Fitzhardinged8355ac2008-07-03 22:10:18 -0700525 val = ((pteval_t)pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700526 }
527
528 return val;
529}
530
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700531pteval_t xen_pte_val(pte_t pte)
532{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700533 return pte_mfn_to_pfn(pte.pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700534}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800535PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700536
537pgdval_t xen_pgd_val(pgd_t pgd)
538{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700539 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700540}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800541PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700542
543pte_t xen_make_pte(pteval_t pte)
544{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700545 pte = pte_pfn_to_mfn(pte);
546 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700547}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800548PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700549
550pgd_t xen_make_pgd(pgdval_t pgd)
551{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700552 pgd = pte_pfn_to_mfn(pgd);
553 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700554}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800555PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700556
557pmdval_t xen_pmd_val(pmd_t pmd)
558{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700559 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700560}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800561PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100562
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100563void xen_set_pud_hyper(pud_t *ptr, pud_t val)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700564{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700565 struct mmu_update u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700566
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700567 preempt_disable();
568
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700569 xen_mc_batch();
570
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700571 /* ptr may be ioremapped for 64-bit pagetable setup */
572 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700573 u.val = pud_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700574 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700575
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700576 ADD_STATS(pud_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
577
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700578 xen_mc_issue(PARAVIRT_LAZY_MMU);
579
580 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700581}
582
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100583void xen_set_pud(pud_t *ptr, pud_t val)
584{
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700585 ADD_STATS(pud_update, 1);
586
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100587 /* If page is not pinned, we can just update the entry
588 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700589 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100590 *ptr = val;
591 return;
592 }
593
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700594 ADD_STATS(pud_update_pinned, 1);
595
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100596 xen_set_pud_hyper(ptr, val);
597}
598
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700599void xen_set_pte(pte_t *ptep, pte_t pte)
600{
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700601 ADD_STATS(pte_update, 1);
602// ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
603 ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
604
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700605#ifdef CONFIG_X86_PAE
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700606 ptep->pte_high = pte.pte_high;
607 smp_wmb();
608 ptep->pte_low = pte.pte_low;
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700609#else
610 *ptep = pte;
611#endif
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700612}
613
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700614#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700615void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
616{
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700617 set_64bit((u64 *)ptep, native_pte_val(pte));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700618}
619
620void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
621{
622 ptep->pte_low = 0;
623 smp_wmb(); /* make sure low gets written first */
624 ptep->pte_high = 0;
625}
626
627void xen_pmd_clear(pmd_t *pmdp)
628{
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100629 set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700630}
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700631#endif /* CONFIG_X86_PAE */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700632
Jeremy Fitzhardingeabf33032008-03-17 16:37:07 -0700633pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700634{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700635 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700636 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700637}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800638PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700639
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700640#if PAGETABLE_LEVELS == 4
641pudval_t xen_pud_val(pud_t pud)
642{
643 return pte_mfn_to_pfn(pud.pud);
644}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800645PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700646
647pud_t xen_make_pud(pudval_t pud)
648{
649 pud = pte_pfn_to_mfn(pud);
650
651 return native_make_pud(pud);
652}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800653PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700654
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700655pgd_t *xen_get_user_pgd(pgd_t *pgd)
656{
657 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
658 unsigned offset = pgd - pgd_page;
659 pgd_t *user_ptr = NULL;
660
661 if (offset < pgd_index(USER_LIMIT)) {
662 struct page *page = virt_to_page(pgd_page);
663 user_ptr = (pgd_t *)page->private;
664 if (user_ptr)
665 user_ptr += offset;
666 }
667
668 return user_ptr;
669}
670
671static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700672{
673 struct mmu_update u;
674
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700675 u.ptr = virt_to_machine(ptr).maddr;
676 u.val = pgd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700677 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700678}
679
680/*
681 * Raw hypercall-based set_pgd, intended for in early boot before
682 * there's a page structure. This implies:
683 * 1. The only existing pagetable is the kernel's
684 * 2. It is always pinned
685 * 3. It has no user pagetable attached to it
686 */
687void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
688{
689 preempt_disable();
690
691 xen_mc_batch();
692
693 __xen_set_pgd_hyper(ptr, val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700694
695 xen_mc_issue(PARAVIRT_LAZY_MMU);
696
697 preempt_enable();
698}
699
700void xen_set_pgd(pgd_t *ptr, pgd_t val)
701{
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700702 pgd_t *user_ptr = xen_get_user_pgd(ptr);
703
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700704 ADD_STATS(pgd_update, 1);
705
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700706 /* If page is not pinned, we can just update the entry
707 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700708 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700709 *ptr = val;
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700710 if (user_ptr) {
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700711 WARN_ON(xen_page_pinned(user_ptr));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700712 *user_ptr = val;
713 }
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700714 return;
715 }
716
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700717 ADD_STATS(pgd_update_pinned, 1);
718 ADD_STATS(pgd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
719
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700720 /* If it's pinned, then we can at least batch the kernel and
721 user updates together. */
722 xen_mc_batch();
723
724 __xen_set_pgd_hyper(ptr, val);
725 if (user_ptr)
726 __xen_set_pgd_hyper(user_ptr, val);
727
728 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700729}
730#endif /* PAGETABLE_LEVELS == 4 */
731
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700732/*
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700733 * (Yet another) pagetable walker. This one is intended for pinning a
734 * pagetable. This means that it walks a pagetable and calls the
735 * callback function on each page it finds making up the page table,
736 * at every level. It walks the entire pagetable, but it only bothers
737 * pinning pte pages which are below limit. In the normal case this
738 * will be STACK_TOP_MAX, but at boot we need to pin up to
739 * FIXADDR_TOP.
740 *
741 * For 32-bit the important bit is that we don't pin beyond there,
742 * because then we start getting into Xen's ptes.
743 *
744 * For 64-bit, we must skip the Xen hole in the middle of the address
745 * space, just after the big x86-64 virtual hole.
746 */
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000747static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
748 int (*func)(struct mm_struct *mm, struct page *,
749 enum pt_level),
750 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700751{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700752 int flush = 0;
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700753 unsigned hole_low, hole_high;
754 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
755 unsigned pgdidx, pudidx, pmdidx;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700756
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700757 /* The limit is the last byte to be touched */
758 limit--;
759 BUG_ON(limit >= FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700760
761 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700762 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700763
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700764 /*
765 * 64-bit has a great big hole in the middle of the address
766 * space, which contains the Xen mappings. On 32-bit these
767 * will end up making a zero-sized hole and so is a no-op.
768 */
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700769 hole_low = pgd_index(USER_LIMIT);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700770 hole_high = pgd_index(PAGE_OFFSET);
771
772 pgdidx_limit = pgd_index(limit);
773#if PTRS_PER_PUD > 1
774 pudidx_limit = pud_index(limit);
775#else
776 pudidx_limit = 0;
777#endif
778#if PTRS_PER_PMD > 1
779 pmdidx_limit = pmd_index(limit);
780#else
781 pmdidx_limit = 0;
782#endif
783
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700784 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700785 pud_t *pud;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700786
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700787 if (pgdidx >= hole_low && pgdidx < hole_high)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700788 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700789
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700790 if (!pgd_val(pgd[pgdidx]))
791 continue;
792
793 pud = pud_offset(&pgd[pgdidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700794
795 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700796 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700797
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700798 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700799 pmd_t *pmd;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700800
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700801 if (pgdidx == pgdidx_limit &&
802 pudidx > pudidx_limit)
803 goto out;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700804
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700805 if (pud_none(pud[pudidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700806 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700807
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700808 pmd = pmd_offset(&pud[pudidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700809
810 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700811 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700812
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700813 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
814 struct page *pte;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700815
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700816 if (pgdidx == pgdidx_limit &&
817 pudidx == pudidx_limit &&
818 pmdidx > pmdidx_limit)
819 goto out;
820
821 if (pmd_none(pmd[pmdidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700822 continue;
823
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700824 pte = pmd_page(pmd[pmdidx]);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700825 flush |= (*func)(mm, pte, PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700826 }
827 }
828 }
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700829
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700830out:
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700831 /* Do the top level last, so that the callbacks can use it as
832 a cue to do final things like tlb flushes. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700833 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700834
835 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700836}
837
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000838static int xen_pgd_walk(struct mm_struct *mm,
839 int (*func)(struct mm_struct *mm, struct page *,
840 enum pt_level),
841 unsigned long limit)
842{
843 return __xen_pgd_walk(mm, mm->pgd, func, limit);
844}
845
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700846/* If we're using split pte locks, then take the page's lock and
847 return a pointer to it. Otherwise return NULL. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700848static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700849{
850 spinlock_t *ptl = NULL;
851
Jeremy Fitzhardingef7d0b922008-09-09 15:43:22 -0700852#if USE_SPLIT_PTLOCKS
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700853 ptl = __pte_lockptr(page);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700854 spin_lock_nest_lock(ptl, &mm->page_table_lock);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700855#endif
856
857 return ptl;
858}
859
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700860static void xen_pte_unlock(void *v)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700861{
862 spinlock_t *ptl = v;
863 spin_unlock(ptl);
864}
865
866static void xen_do_pin(unsigned level, unsigned long pfn)
867{
868 struct mmuext_op *op;
869 struct multicall_space mcs;
870
871 mcs = __xen_mc_entry(sizeof(*op));
872 op = mcs.args;
873 op->cmd = level;
874 op->arg1.mfn = pfn_to_mfn(pfn);
875 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
876}
877
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700878static int xen_pin_page(struct mm_struct *mm, struct page *page,
879 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700880{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700881 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700882 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700883
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700884 if (pgfl)
885 flush = 0; /* already pinned */
886 else if (PageHighMem(page))
887 /* kmaps need flushing if we found an unpinned
888 highpage */
889 flush = 1;
890 else {
891 void *pt = lowmem_page_address(page);
892 unsigned long pfn = page_to_pfn(page);
893 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700894 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700895
896 flush = 0;
897
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700898 /*
899 * We need to hold the pagetable lock between the time
900 * we make the pagetable RO and when we actually pin
901 * it. If we don't, then other users may come in and
902 * attempt to update the pagetable by writing it,
903 * which will fail because the memory is RO but not
904 * pinned, so Xen won't do the trap'n'emulate.
905 *
906 * If we're using split pte locks, we can't hold the
907 * entire pagetable's worth of locks during the
908 * traverse, because we may wrap the preempt count (8
909 * bits). The solution is to mark RO and pin each PTE
910 * page while holding the lock. This means the number
911 * of locks we end up holding is never more than a
912 * batch size (~32 entries, at present).
913 *
914 * If we're not using split pte locks, we needn't pin
915 * the PTE pages independently, because we're
916 * protected by the overall pagetable lock.
917 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700918 ptl = NULL;
919 if (level == PT_PTE)
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700920 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700921
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700922 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
923 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700924 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
925
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700926 if (ptl) {
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700927 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
928
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700929 /* Queue a deferred unlock for when this batch
930 is completed. */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700931 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700932 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700933 }
934
935 return flush;
936}
937
938/* This is called just after a mm has been created, but it has not
939 been used yet. We need to make sure that its pagetable is all
940 read-only, and can be pinned. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700941static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700942{
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100943 vm_unmap_aliases();
944
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700945 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700946
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000947 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100948 /* re-enable interrupts for flushing */
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700949 xen_mc_issue(0);
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100950
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700951 kmap_flush_unused();
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100952
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700953 xen_mc_batch();
954 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700955
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700956#ifdef CONFIG_X86_64
957 {
958 pgd_t *user_pgd = xen_get_user_pgd(pgd);
959
960 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
961
962 if (user_pgd) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700963 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
Tejf63c2f22008-12-16 11:56:06 -0800964 xen_do_pin(MMUEXT_PIN_L4_TABLE,
965 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700966 }
967 }
968#else /* CONFIG_X86_32 */
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700969#ifdef CONFIG_X86_PAE
970 /* Need to make sure unshared kernel PMD is pinnable */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -0800971 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700972 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700973#endif
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100974 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700975#endif /* CONFIG_X86_64 */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700976 xen_mc_issue(0);
977}
978
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700979static void xen_pgd_pin(struct mm_struct *mm)
980{
981 __xen_pgd_pin(mm, mm->pgd);
982}
983
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100984/*
985 * On save, we need to pin all pagetables to make sure they get their
986 * mfns turned into pfns. Search the list for any unpinned pgds and pin
987 * them (unpinned pgds are not currently in use, probably because the
988 * process is under construction or destruction).
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700989 *
990 * Expected to be called in stop_machine() ("equivalent to taking
991 * every spinlock in the system"), so the locking doesn't really
992 * matter all that much.
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100993 */
994void xen_mm_pin_all(void)
995{
996 unsigned long flags;
997 struct page *page;
998
999 spin_lock_irqsave(&pgd_lock, flags);
1000
1001 list_for_each_entry(page, &pgd_list, lru) {
1002 if (!PagePinned(page)) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001003 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001004 SetPageSavePinned(page);
1005 }
1006 }
1007
1008 spin_unlock_irqrestore(&pgd_lock, flags);
1009}
1010
Eduardo Habkostc1f2f092008-07-08 15:06:24 -07001011/*
1012 * The init_mm pagetable is really pinned as soon as its created, but
1013 * that's before we have page structures to store the bits. So do all
1014 * the book-keeping now.
1015 */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001016static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page,
1017 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001018{
1019 SetPagePinned(page);
1020 return 0;
1021}
1022
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001023static void __init xen_mark_init_mm_pinned(void)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001024{
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001025 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001026}
1027
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001028static int xen_unpin_page(struct mm_struct *mm, struct page *page,
1029 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001030{
Christoph Lameterd60cd462008-04-28 02:12:51 -07001031 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001032
1033 if (pgfl && !PageHighMem(page)) {
1034 void *pt = lowmem_page_address(page);
1035 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001036 spinlock_t *ptl = NULL;
1037 struct multicall_space mcs;
1038
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -07001039 /*
1040 * Do the converse to pin_page. If we're using split
1041 * pte locks, we must be holding the lock for while
1042 * the pte page is unpinned but still RO to prevent
1043 * concurrent updates from seeing it in this
1044 * partially-pinned state.
1045 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001046 if (level == PT_PTE) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001047 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001048
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -07001049 if (ptl)
1050 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001051 }
1052
1053 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001054
1055 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
1056 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001057 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
1058
1059 if (ptl) {
1060 /* unlock when batch completed */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001061 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001062 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001063 }
1064
1065 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001066}
1067
1068/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001069static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001070{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001071 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001072
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001073 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001074
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001075#ifdef CONFIG_X86_64
1076 {
1077 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1078
1079 if (user_pgd) {
Tejf63c2f22008-12-16 11:56:06 -08001080 xen_do_pin(MMUEXT_UNPIN_TABLE,
1081 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001082 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001083 }
1084 }
1085#endif
1086
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001087#ifdef CONFIG_X86_PAE
1088 /* Need to make sure unshared kernel PMD is unpinned */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -08001089 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001090 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001091#endif
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001092
Ian Campbell86bbc2c2008-11-21 10:21:33 +00001093 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001094
1095 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001096}
1097
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001098static void xen_pgd_unpin(struct mm_struct *mm)
1099{
1100 __xen_pgd_unpin(mm, mm->pgd);
1101}
1102
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001103/*
1104 * On resume, undo any pinning done at save, so that the rest of the
1105 * kernel doesn't see any unexpected pinned pagetables.
1106 */
1107void xen_mm_unpin_all(void)
1108{
1109 unsigned long flags;
1110 struct page *page;
1111
1112 spin_lock_irqsave(&pgd_lock, flags);
1113
1114 list_for_each_entry(page, &pgd_list, lru) {
1115 if (PageSavePinned(page)) {
1116 BUG_ON(!PagePinned(page));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001117 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001118 ClearPageSavePinned(page);
1119 }
1120 }
1121
1122 spin_unlock_irqrestore(&pgd_lock, flags);
1123}
1124
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001125void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
1126{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001127 spin_lock(&next->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001128 xen_pgd_pin(next);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001129 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001130}
1131
1132void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
1133{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001134 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001135 xen_pgd_pin(mm);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001136 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001137}
1138
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001139
1140#ifdef CONFIG_SMP
1141/* Another cpu may still have their %cr3 pointing at the pagetable, so
1142 we need to repoint it somewhere else before we can unpin it. */
1143static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001144{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001145 struct mm_struct *mm = info;
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001146 struct mm_struct *active_mm;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001147
Brian Gerst9eb912d2009-01-19 00:38:57 +09001148 active_mm = percpu_read(cpu_tlbstate.active_mm);
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001149
1150 if (active_mm == mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001151 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001152
1153 /* If this cpu still has a stale cr3 reference, then make sure
1154 it has been flushed. */
Ingo Molnar6dbde352009-01-15 22:15:53 +09001155 if (percpu_read(xen_current_cr3) == __pa(mm->pgd)) {
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001156 load_cr3(swapper_pg_dir);
1157 arch_flush_lazy_cpu_mode();
1158 }
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001159}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001160
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001161static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001162{
Mike Travise4d98202008-12-16 17:34:05 -08001163 cpumask_var_t mask;
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001164 unsigned cpu;
1165
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001166 if (current->active_mm == mm) {
1167 if (current->mm == mm)
1168 load_cr3(swapper_pg_dir);
1169 else
1170 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001171 arch_flush_lazy_cpu_mode();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001172 }
1173
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001174 /* Get the "official" set of cpus referring to our pagetable. */
Mike Travise4d98202008-12-16 17:34:05 -08001175 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1176 for_each_online_cpu(cpu) {
1177 if (!cpumask_test_cpu(cpu, &mm->cpu_vm_mask)
1178 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1179 continue;
1180 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1181 }
1182 return;
1183 }
1184 cpumask_copy(mask, &mm->cpu_vm_mask);
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001185
1186 /* It's possible that a vcpu may have a stale reference to our
1187 cr3, because its in lazy mode, and it hasn't yet flushed
1188 its set of pending hypercalls yet. In this case, we can
1189 look at its actual current cr3 value, and force it to flush
1190 if needed. */
1191 for_each_online_cpu(cpu) {
1192 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
Mike Travise4d98202008-12-16 17:34:05 -08001193 cpumask_set_cpu(cpu, mask);
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001194 }
1195
Mike Travise4d98202008-12-16 17:34:05 -08001196 if (!cpumask_empty(mask))
1197 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1198 free_cpumask_var(mask);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001199}
1200#else
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001201static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001202{
1203 if (current->active_mm == mm)
1204 load_cr3(swapper_pg_dir);
1205}
1206#endif
1207
1208/*
1209 * While a process runs, Xen pins its pagetables, which means that the
1210 * hypervisor forces it to be read-only, and it controls all updates
1211 * to it. This means that all pagetable updates have to go via the
1212 * hypervisor, which is moderately expensive.
1213 *
1214 * Since we're pulling the pagetable down, we switch to use init_mm,
1215 * unpin old process pagetable and mark it all read-write, which
1216 * allows further operations on it to be simple memory accesses.
1217 *
1218 * The only subtle point is that another CPU may be still using the
1219 * pagetable because of lazy tlb flushing. This means we need need to
1220 * switch all CPUs off this pagetable before we can unpin it.
1221 */
1222void xen_exit_mmap(struct mm_struct *mm)
1223{
1224 get_cpu(); /* make sure we don't move around */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001225 xen_drop_mm_ref(mm);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001226 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001227
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001228 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -07001229
1230 /* pgd may not be pinned in the error exit path of execve */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001231 if (xen_page_pinned(mm->pgd))
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001232 xen_pgd_unpin(mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001233
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001234 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001235}
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07001236
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001237static __init void xen_pagetable_setup_start(pgd_t *base)
1238{
1239}
1240
1241static __init void xen_pagetable_setup_done(pgd_t *base)
1242{
1243 xen_setup_shared_info();
1244}
1245
1246static void xen_write_cr2(unsigned long cr2)
1247{
1248 percpu_read(xen_vcpu)->arch.cr2 = cr2;
1249}
1250
1251static unsigned long xen_read_cr2(void)
1252{
1253 return percpu_read(xen_vcpu)->arch.cr2;
1254}
1255
1256unsigned long xen_read_cr2_direct(void)
1257{
1258 return percpu_read(xen_vcpu_info.arch.cr2);
1259}
1260
1261static void xen_flush_tlb(void)
1262{
1263 struct mmuext_op *op;
1264 struct multicall_space mcs;
1265
1266 preempt_disable();
1267
1268 mcs = xen_mc_entry(sizeof(*op));
1269
1270 op = mcs.args;
1271 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1272 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1273
1274 xen_mc_issue(PARAVIRT_LAZY_MMU);
1275
1276 preempt_enable();
1277}
1278
1279static void xen_flush_tlb_single(unsigned long addr)
1280{
1281 struct mmuext_op *op;
1282 struct multicall_space mcs;
1283
1284 preempt_disable();
1285
1286 mcs = xen_mc_entry(sizeof(*op));
1287 op = mcs.args;
1288 op->cmd = MMUEXT_INVLPG_LOCAL;
1289 op->arg1.linear_addr = addr & PAGE_MASK;
1290 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1291
1292 xen_mc_issue(PARAVIRT_LAZY_MMU);
1293
1294 preempt_enable();
1295}
1296
1297static void xen_flush_tlb_others(const struct cpumask *cpus,
1298 struct mm_struct *mm, unsigned long va)
1299{
1300 struct {
1301 struct mmuext_op op;
1302 DECLARE_BITMAP(mask, NR_CPUS);
1303 } *args;
1304 struct multicall_space mcs;
1305
Jeremy Fitzhardingee3f8a742009-03-04 17:36:57 -08001306 if (cpumask_empty(cpus))
1307 return; /* nothing to do */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001308
1309 mcs = xen_mc_entry(sizeof(*args));
1310 args = mcs.args;
1311 args->op.arg2.vcpumask = to_cpumask(args->mask);
1312
1313 /* Remove us, and any offline CPUS. */
1314 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1315 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001316
1317 if (va == TLB_FLUSH_ALL) {
1318 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1319 } else {
1320 args->op.cmd = MMUEXT_INVLPG_MULTI;
1321 args->op.arg1.linear_addr = va;
1322 }
1323
1324 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1325
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001326 xen_mc_issue(PARAVIRT_LAZY_MMU);
1327}
1328
1329static unsigned long xen_read_cr3(void)
1330{
1331 return percpu_read(xen_cr3);
1332}
1333
1334static void set_current_cr3(void *v)
1335{
1336 percpu_write(xen_current_cr3, (unsigned long)v);
1337}
1338
1339static void __xen_write_cr3(bool kernel, unsigned long cr3)
1340{
1341 struct mmuext_op *op;
1342 struct multicall_space mcs;
1343 unsigned long mfn;
1344
1345 if (cr3)
1346 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1347 else
1348 mfn = 0;
1349
1350 WARN_ON(mfn == 0 && kernel);
1351
1352 mcs = __xen_mc_entry(sizeof(*op));
1353
1354 op = mcs.args;
1355 op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1356 op->arg1.mfn = mfn;
1357
1358 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1359
1360 if (kernel) {
1361 percpu_write(xen_cr3, cr3);
1362
1363 /* Update xen_current_cr3 once the batch has actually
1364 been submitted. */
1365 xen_mc_callback(set_current_cr3, (void *)cr3);
1366 }
1367}
1368
1369static void xen_write_cr3(unsigned long cr3)
1370{
1371 BUG_ON(preemptible());
1372
1373 xen_mc_batch(); /* disables interrupts */
1374
1375 /* Update while interrupts are disabled, so its atomic with
1376 respect to ipis */
1377 percpu_write(xen_cr3, cr3);
1378
1379 __xen_write_cr3(true, cr3);
1380
1381#ifdef CONFIG_X86_64
1382 {
1383 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1384 if (user_pgd)
1385 __xen_write_cr3(false, __pa(user_pgd));
1386 else
1387 __xen_write_cr3(false, 0);
1388 }
1389#endif
1390
1391 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1392}
1393
1394static int xen_pgd_alloc(struct mm_struct *mm)
1395{
1396 pgd_t *pgd = mm->pgd;
1397 int ret = 0;
1398
1399 BUG_ON(PagePinned(virt_to_page(pgd)));
1400
1401#ifdef CONFIG_X86_64
1402 {
1403 struct page *page = virt_to_page(pgd);
1404 pgd_t *user_pgd;
1405
1406 BUG_ON(page->private != 0);
1407
1408 ret = -ENOMEM;
1409
1410 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1411 page->private = (unsigned long)user_pgd;
1412
1413 if (user_pgd != NULL) {
1414 user_pgd[pgd_index(VSYSCALL_START)] =
1415 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1416 ret = 0;
1417 }
1418
1419 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1420 }
1421#endif
1422
1423 return ret;
1424}
1425
1426static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1427{
1428#ifdef CONFIG_X86_64
1429 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1430
1431 if (user_pgd)
1432 free_page((unsigned long)user_pgd);
1433#endif
1434}
1435
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001436#ifdef CONFIG_HIGHPTE
1437static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
1438{
1439 pgprot_t prot = PAGE_KERNEL;
1440
1441 if (PagePinned(page))
1442 prot = PAGE_KERNEL_RO;
1443
1444 if (0 && PageHighMem(page))
1445 printk("mapping highpte %lx type %d prot %s\n",
1446 page_to_pfn(page), type,
1447 (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ");
1448
1449 return kmap_atomic_prot(page, type, prot);
1450}
1451#endif
1452
1453#ifdef CONFIG_X86_32
1454static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1455{
1456 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1457 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1458 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1459 pte_val_ma(pte));
1460
1461 return pte;
1462}
1463
1464/* Init-time set_pte while constructing initial pagetables, which
1465 doesn't allow RO pagetable pages to be remapped RW */
1466static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
1467{
1468 pte = mask_rw_pte(ptep, pte);
1469
1470 xen_set_pte(ptep, pte);
1471}
1472#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001473
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001474static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1475{
1476 struct mmuext_op op;
1477 op.cmd = cmd;
1478 op.arg1.mfn = pfn_to_mfn(pfn);
1479 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1480 BUG();
1481}
1482
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001483/* Early in boot, while setting up the initial pagetable, assume
1484 everything is pinned. */
1485static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1486{
1487#ifdef CONFIG_FLATMEM
1488 BUG_ON(mem_map); /* should only be used early */
1489#endif
1490 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001491 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1492}
1493
1494/* Used for pmd and pud */
1495static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1496{
1497#ifdef CONFIG_FLATMEM
1498 BUG_ON(mem_map); /* should only be used early */
1499#endif
1500 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001501}
1502
1503/* Early release_pte assumes that all pts are pinned, since there's
1504 only init_mm and anything attached to that is pinned. */
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001505static __init void xen_release_pte_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001506{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001507 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001508 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1509}
1510
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001511static __init void xen_release_pmd_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001512{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001513 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001514}
1515
1516/* This needs to make sure the new pte page is pinned iff its being
1517 attached to a pinned pagetable. */
1518static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
1519{
1520 struct page *page = pfn_to_page(pfn);
1521
1522 if (PagePinned(virt_to_page(mm->pgd))) {
1523 SetPagePinned(page);
1524
1525 vm_unmap_aliases();
1526 if (!PageHighMem(page)) {
1527 make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
1528 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1529 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1530 } else {
1531 /* make sure there are no stray mappings of
1532 this page */
1533 kmap_flush_unused();
1534 }
1535 }
1536}
1537
1538static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1539{
1540 xen_alloc_ptpage(mm, pfn, PT_PTE);
1541}
1542
1543static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1544{
1545 xen_alloc_ptpage(mm, pfn, PT_PMD);
1546}
1547
1548/* This should never happen until we're OK to use struct page */
1549static void xen_release_ptpage(unsigned long pfn, unsigned level)
1550{
1551 struct page *page = pfn_to_page(pfn);
1552
1553 if (PagePinned(page)) {
1554 if (!PageHighMem(page)) {
1555 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1556 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1557 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1558 }
1559 ClearPagePinned(page);
1560 }
1561}
1562
1563static void xen_release_pte(unsigned long pfn)
1564{
1565 xen_release_ptpage(pfn, PT_PTE);
1566}
1567
1568static void xen_release_pmd(unsigned long pfn)
1569{
1570 xen_release_ptpage(pfn, PT_PMD);
1571}
1572
1573#if PAGETABLE_LEVELS == 4
1574static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1575{
1576 xen_alloc_ptpage(mm, pfn, PT_PUD);
1577}
1578
1579static void xen_release_pud(unsigned long pfn)
1580{
1581 xen_release_ptpage(pfn, PT_PUD);
1582}
1583#endif
1584
1585void __init xen_reserve_top(void)
1586{
1587#ifdef CONFIG_X86_32
1588 unsigned long top = HYPERVISOR_VIRT_START;
1589 struct xen_platform_parameters pp;
1590
1591 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1592 top = pp.virt_start;
1593
1594 reserve_top_address(-top);
1595#endif /* CONFIG_X86_32 */
1596}
1597
1598/*
1599 * Like __va(), but returns address in the kernel mapping (which is
1600 * all we have until the physical memory mapping has been set up.
1601 */
1602static void *__ka(phys_addr_t paddr)
1603{
1604#ifdef CONFIG_X86_64
1605 return (void *)(paddr + __START_KERNEL_map);
1606#else
1607 return __va(paddr);
1608#endif
1609}
1610
1611/* Convert a machine address to physical address */
1612static unsigned long m2p(phys_addr_t maddr)
1613{
1614 phys_addr_t paddr;
1615
1616 maddr &= PTE_PFN_MASK;
1617 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1618
1619 return paddr;
1620}
1621
1622/* Convert a machine address to kernel virtual */
1623static void *m2v(phys_addr_t maddr)
1624{
1625 return __ka(m2p(maddr));
1626}
1627
1628static void set_page_prot(void *addr, pgprot_t prot)
1629{
1630 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1631 pte_t pte = pfn_pte(pfn, prot);
1632
1633 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1634 BUG();
1635}
1636
1637static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1638{
1639 unsigned pmdidx, pteidx;
1640 unsigned ident_pte;
1641 unsigned long pfn;
1642
1643 ident_pte = 0;
1644 pfn = 0;
1645 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1646 pte_t *pte_page;
1647
1648 /* Reuse or allocate a page of ptes */
1649 if (pmd_present(pmd[pmdidx]))
1650 pte_page = m2v(pmd[pmdidx].pmd);
1651 else {
1652 /* Check for free pte pages */
1653 if (ident_pte == ARRAY_SIZE(level1_ident_pgt))
1654 break;
1655
1656 pte_page = &level1_ident_pgt[ident_pte];
1657 ident_pte += PTRS_PER_PTE;
1658
1659 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1660 }
1661
1662 /* Install mappings */
1663 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1664 pte_t pte;
1665
1666 if (pfn > max_pfn_mapped)
1667 max_pfn_mapped = pfn;
1668
1669 if (!pte_none(pte_page[pteidx]))
1670 continue;
1671
1672 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1673 pte_page[pteidx] = pte;
1674 }
1675 }
1676
1677 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1678 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1679
1680 set_page_prot(pmd, PAGE_KERNEL_RO);
1681}
1682
1683#ifdef CONFIG_X86_64
1684static void convert_pfn_mfn(void *v)
1685{
1686 pte_t *pte = v;
1687 int i;
1688
1689 /* All levels are converted the same way, so just treat them
1690 as ptes. */
1691 for (i = 0; i < PTRS_PER_PTE; i++)
1692 pte[i] = xen_make_pte(pte[i].pte);
1693}
1694
1695/*
1696 * Set up the inital kernel pagetable.
1697 *
1698 * We can construct this by grafting the Xen provided pagetable into
1699 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1700 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
1701 * means that only the kernel has a physical mapping to start with -
1702 * but that's enough to get __va working. We need to fill in the rest
1703 * of the physical mapping once some sort of allocator has been set
1704 * up.
1705 */
1706__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1707 unsigned long max_pfn)
1708{
1709 pud_t *l3;
1710 pmd_t *l2;
1711
1712 /* Zap identity mapping */
1713 init_level4_pgt[0] = __pgd(0);
1714
1715 /* Pre-constructed entries are in pfn, so convert to mfn */
1716 convert_pfn_mfn(init_level4_pgt);
1717 convert_pfn_mfn(level3_ident_pgt);
1718 convert_pfn_mfn(level3_kernel_pgt);
1719
1720 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1721 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1722
1723 memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1724 memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1725
1726 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1727 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
1728 memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1729
1730 /* Set up identity map */
1731 xen_map_identity_early(level2_ident_pgt, max_pfn);
1732
1733 /* Make pagetable pieces RO */
1734 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1735 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1736 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1737 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1738 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1739 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1740
1741 /* Pin down new L4 */
1742 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1743 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1744
1745 /* Unpin Xen-provided one */
1746 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1747
1748 /* Switch over */
1749 pgd = init_level4_pgt;
1750
1751 /*
1752 * At this stage there can be no user pgd, and no page
1753 * structure to attach it to, so make sure we just set kernel
1754 * pgd.
1755 */
1756 xen_mc_batch();
1757 __xen_write_cr3(true, __pa(pgd));
1758 xen_mc_issue(PARAVIRT_LAZY_CPU);
1759
1760 reserve_early(__pa(xen_start_info->pt_base),
1761 __pa(xen_start_info->pt_base +
1762 xen_start_info->nr_pt_frames * PAGE_SIZE),
1763 "XEN PAGETABLES");
1764
1765 return pgd;
1766}
1767#else /* !CONFIG_X86_64 */
1768static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss;
1769
1770__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1771 unsigned long max_pfn)
1772{
1773 pmd_t *kernel_pmd;
1774
Jeremy Fitzhardinge93dbda72009-02-26 17:35:44 -08001775 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
1776 xen_start_info->nr_pt_frames * PAGE_SIZE +
1777 512*1024);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001778
1779 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
1780 memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
1781
1782 xen_map_identity_early(level2_kernel_pgt, max_pfn);
1783
1784 memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
1785 set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY],
1786 __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT));
1787
1788 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1789 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1790 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
1791
1792 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1793
1794 xen_write_cr3(__pa(swapper_pg_dir));
1795
1796 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));
1797
Jeremy Fitzhardinge33df4db2009-05-07 11:56:44 -07001798 reserve_early(__pa(xen_start_info->pt_base),
1799 __pa(xen_start_info->pt_base +
1800 xen_start_info->nr_pt_frames * PAGE_SIZE),
1801 "XEN PAGETABLES");
1802
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001803 return swapper_pg_dir;
1804}
1805#endif /* CONFIG_X86_64 */
1806
Masami Hiramatsu3b3809a2009-04-09 10:55:33 -07001807static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001808{
1809 pte_t pte;
1810
1811 phys >>= PAGE_SHIFT;
1812
1813 switch (idx) {
1814 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
1815#ifdef CONFIG_X86_F00F_BUG
1816 case FIX_F00F_IDT:
1817#endif
1818#ifdef CONFIG_X86_32
1819 case FIX_WP_TEST:
1820 case FIX_VDSO:
1821# ifdef CONFIG_HIGHMEM
1822 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
1823# endif
1824#else
1825 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
1826#endif
1827#ifdef CONFIG_X86_LOCAL_APIC
1828 case FIX_APIC_BASE: /* maps dummy local APIC */
1829#endif
Jeremy Fitzhardinge3ecb1b72009-03-07 23:48:41 -08001830 case FIX_TEXT_POKE0:
1831 case FIX_TEXT_POKE1:
1832 /* All local page mappings */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001833 pte = pfn_pte(phys, prot);
1834 break;
1835
1836 default:
1837 pte = mfn_pte(phys, prot);
1838 break;
1839 }
1840
1841 __native_set_fixmap(idx, pte);
1842
1843#ifdef CONFIG_X86_64
1844 /* Replicate changes to map the vsyscall page into the user
1845 pagetable vsyscall mapping. */
1846 if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) {
1847 unsigned long vaddr = __fix_to_virt(idx);
1848 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
1849 }
1850#endif
1851}
1852
1853__init void xen_post_allocator_init(void)
1854{
1855 pv_mmu_ops.set_pte = xen_set_pte;
1856 pv_mmu_ops.set_pmd = xen_set_pmd;
1857 pv_mmu_ops.set_pud = xen_set_pud;
1858#if PAGETABLE_LEVELS == 4
1859 pv_mmu_ops.set_pgd = xen_set_pgd;
1860#endif
1861
1862 /* This will work as long as patching hasn't happened yet
1863 (which it hasn't) */
1864 pv_mmu_ops.alloc_pte = xen_alloc_pte;
1865 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
1866 pv_mmu_ops.release_pte = xen_release_pte;
1867 pv_mmu_ops.release_pmd = xen_release_pmd;
1868#if PAGETABLE_LEVELS == 4
1869 pv_mmu_ops.alloc_pud = xen_alloc_pud;
1870 pv_mmu_ops.release_pud = xen_release_pud;
1871#endif
1872
1873#ifdef CONFIG_X86_64
1874 SetPagePinned(virt_to_page(level3_user_vsyscall));
1875#endif
1876 xen_mark_init_mm_pinned();
1877}
1878
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001879const struct pv_mmu_ops xen_mmu_ops __initdata = {
1880 .pagetable_setup_start = xen_pagetable_setup_start,
1881 .pagetable_setup_done = xen_pagetable_setup_done,
1882
1883 .read_cr2 = xen_read_cr2,
1884 .write_cr2 = xen_write_cr2,
1885
1886 .read_cr3 = xen_read_cr3,
1887 .write_cr3 = xen_write_cr3,
1888
1889 .flush_tlb_user = xen_flush_tlb,
1890 .flush_tlb_kernel = xen_flush_tlb,
1891 .flush_tlb_single = xen_flush_tlb_single,
1892 .flush_tlb_others = xen_flush_tlb_others,
1893
1894 .pte_update = paravirt_nop,
1895 .pte_update_defer = paravirt_nop,
1896
1897 .pgd_alloc = xen_pgd_alloc,
1898 .pgd_free = xen_pgd_free,
1899
1900 .alloc_pte = xen_alloc_pte_init,
1901 .release_pte = xen_release_pte_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001902 .alloc_pmd = xen_alloc_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001903 .alloc_pmd_clone = paravirt_nop,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001904 .release_pmd = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001905
1906#ifdef CONFIG_HIGHPTE
1907 .kmap_atomic_pte = xen_kmap_atomic_pte,
1908#endif
1909
1910#ifdef CONFIG_X86_64
1911 .set_pte = xen_set_pte,
1912#else
1913 .set_pte = xen_set_pte_init,
1914#endif
1915 .set_pte_at = xen_set_pte_at,
1916 .set_pmd = xen_set_pmd_hyper,
1917
1918 .ptep_modify_prot_start = __ptep_modify_prot_start,
1919 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
1920
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08001921 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
1922 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001923
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08001924 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
1925 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001926
1927#ifdef CONFIG_X86_PAE
1928 .set_pte_atomic = xen_set_pte_atomic,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001929 .pte_clear = xen_pte_clear,
1930 .pmd_clear = xen_pmd_clear,
1931#endif /* CONFIG_X86_PAE */
1932 .set_pud = xen_set_pud_hyper,
1933
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08001934 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
1935 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001936
1937#if PAGETABLE_LEVELS == 4
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08001938 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
1939 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001940 .set_pgd = xen_set_pgd_hyper,
1941
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001942 .alloc_pud = xen_alloc_pmd_init,
1943 .release_pud = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001944#endif /* PAGETABLE_LEVELS == 4 */
1945
1946 .activate_mm = xen_activate_mm,
1947 .dup_mmap = xen_dup_mmap,
1948 .exit_mmap = xen_exit_mmap,
1949
1950 .lazy_mode = {
1951 .enter = paravirt_enter_lazy_mmu,
1952 .leave = xen_leave_lazy,
1953 },
1954
1955 .set_fixmap = xen_set_fixmap,
1956};
1957
1958
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07001959#ifdef CONFIG_XEN_DEBUG_FS
1960
1961static struct dentry *d_mmu_debug;
1962
1963static int __init xen_mmu_debugfs(void)
1964{
1965 struct dentry *d_xen = xen_init_debugfs();
1966
1967 if (d_xen == NULL)
1968 return -ENOMEM;
1969
1970 d_mmu_debug = debugfs_create_dir("mmu", d_xen);
1971
1972 debugfs_create_u8("zero_stats", 0644, d_mmu_debug, &zero_stats);
1973
1974 debugfs_create_u32("pgd_update", 0444, d_mmu_debug, &mmu_stats.pgd_update);
1975 debugfs_create_u32("pgd_update_pinned", 0444, d_mmu_debug,
1976 &mmu_stats.pgd_update_pinned);
1977 debugfs_create_u32("pgd_update_batched", 0444, d_mmu_debug,
1978 &mmu_stats.pgd_update_pinned);
1979
1980 debugfs_create_u32("pud_update", 0444, d_mmu_debug, &mmu_stats.pud_update);
1981 debugfs_create_u32("pud_update_pinned", 0444, d_mmu_debug,
1982 &mmu_stats.pud_update_pinned);
1983 debugfs_create_u32("pud_update_batched", 0444, d_mmu_debug,
1984 &mmu_stats.pud_update_pinned);
1985
1986 debugfs_create_u32("pmd_update", 0444, d_mmu_debug, &mmu_stats.pmd_update);
1987 debugfs_create_u32("pmd_update_pinned", 0444, d_mmu_debug,
1988 &mmu_stats.pmd_update_pinned);
1989 debugfs_create_u32("pmd_update_batched", 0444, d_mmu_debug,
1990 &mmu_stats.pmd_update_pinned);
1991
1992 debugfs_create_u32("pte_update", 0444, d_mmu_debug, &mmu_stats.pte_update);
1993// debugfs_create_u32("pte_update_pinned", 0444, d_mmu_debug,
1994// &mmu_stats.pte_update_pinned);
1995 debugfs_create_u32("pte_update_batched", 0444, d_mmu_debug,
1996 &mmu_stats.pte_update_pinned);
1997
1998 debugfs_create_u32("mmu_update", 0444, d_mmu_debug, &mmu_stats.mmu_update);
1999 debugfs_create_u32("mmu_update_extended", 0444, d_mmu_debug,
2000 &mmu_stats.mmu_update_extended);
2001 xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug,
2002 mmu_stats.mmu_update_histo, 20);
2003
2004 debugfs_create_u32("set_pte_at", 0444, d_mmu_debug, &mmu_stats.set_pte_at);
2005 debugfs_create_u32("set_pte_at_batched", 0444, d_mmu_debug,
2006 &mmu_stats.set_pte_at_batched);
2007 debugfs_create_u32("set_pte_at_current", 0444, d_mmu_debug,
2008 &mmu_stats.set_pte_at_current);
2009 debugfs_create_u32("set_pte_at_kernel", 0444, d_mmu_debug,
2010 &mmu_stats.set_pte_at_kernel);
2011
2012 debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit);
2013 debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug,
2014 &mmu_stats.prot_commit_batched);
2015
2016 return 0;
2017}
2018fs_initcall(xen_mmu_debugfs);
2019
2020#endif /* CONFIG_XEN_DEBUG_FS */