blob: 9e0d82fc21e4aad8a0d535085b51f4acb9c58925 [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070043#include <linux/debugfs.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044#include <linux/bug.h>
Randy Dunlap44408ad2009-05-12 13:31:40 -070045#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090046#include <linux/gfp.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070047
48#include <asm/pgtable.h>
49#include <asm/tlbflush.h>
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -070050#include <asm/fixmap.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070051#include <asm/mmu_context.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080052#include <asm/setup.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070053#include <asm/paravirt.h>
Alex Nixon7347b402010-02-19 13:31:06 -050054#include <asm/e820.h>
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070055#include <asm/linkage.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070056
57#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070058#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070059
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080060#include <xen/xen.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070061#include <xen/page.h>
62#include <xen/interface/xen.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080063#include <xen/interface/version.h>
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080064#include <xen/interface/memory.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080065#include <xen/hvc-console.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070066
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070067#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070068#include "mmu.h"
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070069#include "debugfs.h"
70
71#define MMU_UPDATE_HISTO 30
72
Alex Nixon19001c82009-02-09 12:05:46 -080073/*
74 * Protects atomic reservation decrease/increase against concurrent increases.
75 * Also protects non-atomic updates of current_pages and driver_pages, and
76 * balloon lists.
77 */
78DEFINE_SPINLOCK(xen_reservation_lock);
79
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070080#ifdef CONFIG_XEN_DEBUG_FS
81
82static struct {
83 u32 pgd_update;
84 u32 pgd_update_pinned;
85 u32 pgd_update_batched;
86
87 u32 pud_update;
88 u32 pud_update_pinned;
89 u32 pud_update_batched;
90
91 u32 pmd_update;
92 u32 pmd_update_pinned;
93 u32 pmd_update_batched;
94
95 u32 pte_update;
96 u32 pte_update_pinned;
97 u32 pte_update_batched;
98
99 u32 mmu_update;
100 u32 mmu_update_extended;
101 u32 mmu_update_histo[MMU_UPDATE_HISTO];
102
103 u32 prot_commit;
104 u32 prot_commit_batched;
105
106 u32 set_pte_at;
107 u32 set_pte_at_batched;
108 u32 set_pte_at_pinned;
109 u32 set_pte_at_current;
110 u32 set_pte_at_kernel;
111} mmu_stats;
112
113static u8 zero_stats;
114
115static inline void check_zero(void)
116{
117 if (unlikely(zero_stats)) {
118 memset(&mmu_stats, 0, sizeof(mmu_stats));
119 zero_stats = 0;
120 }
121}
122
123#define ADD_STATS(elem, val) \
124 do { check_zero(); mmu_stats.elem += (val); } while(0)
125
126#else /* !CONFIG_XEN_DEBUG_FS */
127
128#define ADD_STATS(elem, val) do { (void)(val); } while(0)
129
130#endif /* CONFIG_XEN_DEBUG_FS */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700131
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -0800132
133/*
134 * Identity map, in addition to plain kernel map. This needs to be
135 * large enough to allocate page table pages to allocate the rest.
136 * Each page can map 2MB.
137 */
138static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss;
139
140#ifdef CONFIG_X86_64
141/* l3 pud for userspace vsyscall mapping */
142static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
143#endif /* CONFIG_X86_64 */
144
145/*
146 * Note about cr3 (pagetable base) values:
147 *
148 * xen_cr3 contains the current logical cr3 value; it contains the
149 * last set cr3. This may not be the current effective cr3, because
150 * its update may be being lazily deferred. However, a vcpu looking
151 * at its own cr3 can use this value knowing that it everything will
152 * be self-consistent.
153 *
154 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
155 * hypercall to set the vcpu cr3 is complete (so it may be a little
156 * out of date, but it will never be set early). If one vcpu is
157 * looking at another vcpu's cr3 value, it should use this variable.
158 */
159DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
160DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
161
162
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700163/*
164 * Just beyond the highest usermode address. STACK_TOP_MAX has a
165 * redzone above it, so round it up to a PGD boundary.
166 */
167#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
168
169
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100170#define P2M_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100171#define TOP_ENTRIES (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100172
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100173/* Placeholder for holes in the address space */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -0700174static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE] __page_aligned_data =
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100175 { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };
176
177 /* Array of pointers to pages containing p2m entries */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -0700178static unsigned long *p2m_top[TOP_ENTRIES] __page_aligned_data =
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100179 { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100180
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100181/* Arrays of p2m arrays expressed in mfns used for save/restore */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -0700182static unsigned long p2m_top_mfn[TOP_ENTRIES] __page_aligned_bss;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100183
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -0700184static unsigned long p2m_top_mfn_list[TOP_ENTRIES / P2M_ENTRIES_PER_PAGE]
185 __page_aligned_bss;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100186
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100187static inline unsigned p2m_top_index(unsigned long pfn)
188{
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100189 BUG_ON(pfn >= MAX_DOMAIN_PAGES);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100190 return pfn / P2M_ENTRIES_PER_PAGE;
191}
192
193static inline unsigned p2m_index(unsigned long pfn)
194{
195 return pfn % P2M_ENTRIES_PER_PAGE;
196}
197
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100198/* Build the parallel p2m_top_mfn structures */
Ian Campbellfa24ba62009-11-21 11:32:49 +0000199void xen_build_mfn_list_list(void)
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100200{
201 unsigned pfn, idx;
202
Tejf63c2f22008-12-16 11:56:06 -0800203 for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100204 unsigned topidx = p2m_top_index(pfn);
205
206 p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]);
207 }
208
Tejf63c2f22008-12-16 11:56:06 -0800209 for (idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100210 unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
211 p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
212 }
Jeremy Fitzhardingecdaead62009-02-27 15:34:59 -0800213}
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100214
Jeremy Fitzhardingecdaead62009-02-27 15:34:59 -0800215void xen_setup_mfn_list_list(void)
216{
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100217 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
218
219 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
220 virt_to_mfn(p2m_top_mfn_list);
221 HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages;
222}
223
224/* Set up p2m_top to point to the domain-builder provided p2m pages */
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100225void __init xen_build_dynamic_phys_to_machine(void)
226{
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100227 unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100228 unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100229 unsigned pfn;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100230
Tejf63c2f22008-12-16 11:56:06 -0800231 for (pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100232 unsigned topidx = p2m_top_index(pfn);
233
234 p2m_top[topidx] = &mfn_list[pfn];
235 }
Jeremy Fitzhardingecdaead62009-02-27 15:34:59 -0800236
237 xen_build_mfn_list_list();
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100238}
239
240unsigned long get_phys_to_machine(unsigned long pfn)
241{
242 unsigned topidx, idx;
243
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100244 if (unlikely(pfn >= MAX_DOMAIN_PAGES))
245 return INVALID_P2M_ENTRY;
246
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100247 topidx = p2m_top_index(pfn);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100248 idx = p2m_index(pfn);
249 return p2m_top[topidx][idx];
250}
Ingo Molnar15ce60052008-06-02 13:20:11 +0200251EXPORT_SYMBOL_GPL(get_phys_to_machine);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100252
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800253/* install a new p2m_top page */
254bool install_p2mtop_page(unsigned long pfn, unsigned long *p)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100255{
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800256 unsigned topidx = p2m_top_index(pfn);
257 unsigned long **pfnp, *mfnp;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100258 unsigned i;
259
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800260 pfnp = &p2m_top[topidx];
261 mfnp = &p2m_top_mfn[topidx];
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100262
Tejf63c2f22008-12-16 11:56:06 -0800263 for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100264 p[i] = INVALID_P2M_ENTRY;
265
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800266 if (cmpxchg(pfnp, p2m_missing, p) == p2m_missing) {
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100267 *mfnp = virt_to_mfn(p);
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800268 return true;
269 }
270
271 return false;
272}
273
274static void alloc_p2m(unsigned long pfn)
275{
276 unsigned long *p;
277
278 p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
279 BUG_ON(p == NULL);
280
281 if (!install_p2mtop_page(pfn, p))
282 free_page((unsigned long)p);
283}
284
285/* Try to install p2m mapping; fail if intermediate bits missing */
286bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
287{
288 unsigned topidx, idx;
289
290 if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
291 BUG_ON(mfn != INVALID_P2M_ENTRY);
292 return true;
293 }
294
295 topidx = p2m_top_index(pfn);
296 if (p2m_top[topidx] == p2m_missing) {
297 if (mfn == INVALID_P2M_ENTRY)
298 return true;
299 return false;
300 }
301
302 idx = p2m_index(pfn);
303 p2m_top[topidx][idx] = mfn;
304
305 return true;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100306}
307
308void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
309{
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100310 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
311 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
312 return;
313 }
314
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800315 if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
316 alloc_p2m(pfn);
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100317
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800318 if (!__set_phys_to_machine(pfn, mfn))
319 BUG();
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100320 }
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100321}
322
Jeremy Fitzhardinge9976b392009-02-27 09:19:26 -0800323unsigned long arbitrary_virt_to_mfn(void *vaddr)
324{
325 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
326
327 return PFN_DOWN(maddr.maddr);
328}
329
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700330xmaddr_t arbitrary_virt_to_machine(void *vaddr)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700331{
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700332 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100333 unsigned int level;
Chris Lalancette9f32d212008-10-23 17:40:25 -0700334 pte_t *pte;
335 unsigned offset;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700336
Chris Lalancette9f32d212008-10-23 17:40:25 -0700337 /*
338 * if the PFN is in the linear mapped vaddr range, we can just use
339 * the (quick) virt_to_machine() p2m lookup
340 */
341 if (virt_addr_valid(vaddr))
342 return virt_to_machine(vaddr);
343
344 /* otherwise we have to do a (slower) full page-table walk */
345
346 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700347 BUG_ON(pte == NULL);
Chris Lalancette9f32d212008-10-23 17:40:25 -0700348 offset = address & ~PAGE_MASK;
Jeremy Fitzhardingeebd879e2008-07-08 15:06:54 -0700349 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700350}
351
352void make_lowmem_page_readonly(void *vaddr)
353{
354 pte_t *pte, ptev;
355 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100356 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700357
Ingo Molnarf0646e42008-01-30 13:33:43 +0100358 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700359 BUG_ON(pte == NULL);
360
361 ptev = pte_wrprotect(*pte);
362
363 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
364 BUG();
365}
366
367void make_lowmem_page_readwrite(void *vaddr)
368{
369 pte_t *pte, ptev;
370 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100371 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700372
Ingo Molnarf0646e42008-01-30 13:33:43 +0100373 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700374 BUG_ON(pte == NULL);
375
376 ptev = pte_mkwrite(*pte);
377
378 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
379 BUG();
380}
381
382
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700383static bool xen_page_pinned(void *ptr)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100384{
385 struct page *page = virt_to_page(ptr);
386
387 return PagePinned(page);
388}
389
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800390static bool xen_iomap_pte(pte_t pte)
391{
Alex Nixon7347b402010-02-19 13:31:06 -0500392 return pte_flags(pte) & _PAGE_IOMAP;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800393}
394
395static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval)
396{
397 struct multicall_space mcs;
398 struct mmu_update *u;
399
400 mcs = xen_mc_entry(sizeof(*u));
401 u = mcs.args;
402
403 /* ptep might be kmapped when using 32-bit HIGHPTE */
404 u->ptr = arbitrary_virt_to_machine(ptep).maddr;
405 u->val = pte_val_ma(pteval);
406
407 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_IO);
408
409 xen_mc_issue(PARAVIRT_LAZY_MMU);
410}
411
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700412static void xen_extend_mmu_update(const struct mmu_update *update)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700413{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700414 struct multicall_space mcs;
415 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700416
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700417 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
418
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700419 if (mcs.mc != NULL) {
420 ADD_STATS(mmu_update_extended, 1);
421 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1);
422
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700423 mcs.mc->args[1]++;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700424
425 if (mcs.mc->args[1] < MMU_UPDATE_HISTO)
426 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1);
427 else
428 ADD_STATS(mmu_update_histo[0], 1);
429 } else {
430 ADD_STATS(mmu_update, 1);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700431 mcs = __xen_mc_entry(sizeof(*u));
432 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700433 ADD_STATS(mmu_update_histo[1], 1);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700434 }
435
436 u = mcs.args;
437 *u = *update;
438}
439
440void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
441{
442 struct mmu_update u;
443
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700444 preempt_disable();
445
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700446 xen_mc_batch();
447
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700448 /* ptr may be ioremapped for 64-bit pagetable setup */
449 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700450 u.val = pmd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700451 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700452
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700453 ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
454
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700455 xen_mc_issue(PARAVIRT_LAZY_MMU);
456
457 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700458}
459
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100460void xen_set_pmd(pmd_t *ptr, pmd_t val)
461{
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700462 ADD_STATS(pmd_update, 1);
463
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100464 /* If page is not pinned, we can just update the entry
465 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700466 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100467 *ptr = val;
468 return;
469 }
470
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700471 ADD_STATS(pmd_update_pinned, 1);
472
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100473 xen_set_pmd_hyper(ptr, val);
474}
475
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700476/*
477 * Associate a virtual page frame with a given physical page frame
478 * and protection flags for that frame.
479 */
480void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
481{
Jeremy Fitzhardinge836fe2f2008-07-08 15:06:58 -0700482 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700483}
484
485void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
486 pte_t *ptep, pte_t pteval)
487{
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800488 if (xen_iomap_pte(pteval)) {
489 xen_set_iomap_pte(ptep, pteval);
490 goto out;
491 }
492
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700493 ADD_STATS(set_pte_at, 1);
494// ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep));
495 ADD_STATS(set_pte_at_current, mm == current->mm);
496 ADD_STATS(set_pte_at_kernel, mm == &init_mm);
497
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700498 if (mm == current->mm || mm == &init_mm) {
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700499 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700500 struct multicall_space mcs;
501 mcs = xen_mc_entry(0);
502
503 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700504 ADD_STATS(set_pte_at_batched, 1);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700505 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700506 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700507 } else
508 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700509 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700510 }
511 xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700512
Jeremy Fitzhardinge2829b442009-02-17 23:53:19 -0800513out: return;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700514}
515
Tejf63c2f22008-12-16 11:56:06 -0800516pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
517 unsigned long addr, pte_t *ptep)
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700518{
519 /* Just return the pte as-is. We preserve the bits on commit */
520 return *ptep;
521}
522
523void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
524 pte_t *ptep, pte_t pte)
525{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700526 struct mmu_update u;
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700527
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700528 xen_mc_batch();
529
Chris Lalancette9f32d212008-10-23 17:40:25 -0700530 u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700531 u.val = pte_val_ma(pte);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700532 xen_extend_mmu_update(&u);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700533
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700534 ADD_STATS(prot_commit, 1);
535 ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
536
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700537 xen_mc_issue(PARAVIRT_LAZY_MMU);
538}
539
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700540/* Assume pteval_t is equivalent to all the other *val_t types. */
541static pteval_t pte_mfn_to_pfn(pteval_t val)
542{
543 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700544 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700545 pteval_t flags = val & PTE_FLAGS_MASK;
Jeremy Fitzhardinged8355ac2008-07-03 22:10:18 -0700546 val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700547 }
548
549 return val;
550}
551
552static pteval_t pte_pfn_to_mfn(pteval_t val)
553{
554 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700555 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700556 pteval_t flags = val & PTE_FLAGS_MASK;
Jeremy Fitzhardinged8355ac2008-07-03 22:10:18 -0700557 val = ((pteval_t)pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700558 }
559
560 return val;
561}
562
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800563static pteval_t iomap_pte(pteval_t val)
564{
565 if (val & _PAGE_PRESENT) {
566 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
567 pteval_t flags = val & PTE_FLAGS_MASK;
568
569 /* We assume the pte frame number is a MFN, so
570 just use it as-is. */
571 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
572 }
573
574 return val;
575}
576
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700577pteval_t xen_pte_val(pte_t pte)
578{
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800579 if (xen_initial_domain() && (pte.pte & _PAGE_IOMAP))
580 return pte.pte;
581
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700582 return pte_mfn_to_pfn(pte.pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700583}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800584PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700585
586pgdval_t xen_pgd_val(pgd_t pgd)
587{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700588 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700589}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800590PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700591
592pte_t xen_make_pte(pteval_t pte)
593{
Alex Nixon7347b402010-02-19 13:31:06 -0500594 phys_addr_t addr = (pte & PTE_PFN_MASK);
595
596 /*
597 * Unprivileged domains are allowed to do IOMAPpings for
598 * PCI passthrough, but not map ISA space. The ISA
599 * mappings are just dummy local mappings to keep other
600 * parts of the kernel happy.
601 */
602 if (unlikely(pte & _PAGE_IOMAP) &&
603 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800604 pte = iomap_pte(pte);
Alex Nixon7347b402010-02-19 13:31:06 -0500605 } else {
606 pte &= ~_PAGE_IOMAP;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800607 pte = pte_pfn_to_mfn(pte);
Alex Nixon7347b402010-02-19 13:31:06 -0500608 }
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800609
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700610 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700611}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800612PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700613
614pgd_t xen_make_pgd(pgdval_t pgd)
615{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700616 pgd = pte_pfn_to_mfn(pgd);
617 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700618}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800619PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700620
621pmdval_t xen_pmd_val(pmd_t pmd)
622{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700623 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700624}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800625PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100626
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100627void xen_set_pud_hyper(pud_t *ptr, pud_t val)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700628{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700629 struct mmu_update u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700630
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700631 preempt_disable();
632
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700633 xen_mc_batch();
634
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700635 /* ptr may be ioremapped for 64-bit pagetable setup */
636 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700637 u.val = pud_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700638 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700639
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700640 ADD_STATS(pud_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
641
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700642 xen_mc_issue(PARAVIRT_LAZY_MMU);
643
644 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700645}
646
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100647void xen_set_pud(pud_t *ptr, pud_t val)
648{
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700649 ADD_STATS(pud_update, 1);
650
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100651 /* If page is not pinned, we can just update the entry
652 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700653 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100654 *ptr = val;
655 return;
656 }
657
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700658 ADD_STATS(pud_update_pinned, 1);
659
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100660 xen_set_pud_hyper(ptr, val);
661}
662
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700663void xen_set_pte(pte_t *ptep, pte_t pte)
664{
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800665 if (xen_iomap_pte(pte)) {
666 xen_set_iomap_pte(ptep, pte);
667 return;
668 }
669
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700670 ADD_STATS(pte_update, 1);
671// ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
672 ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
673
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700674#ifdef CONFIG_X86_PAE
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700675 ptep->pte_high = pte.pte_high;
676 smp_wmb();
677 ptep->pte_low = pte.pte_low;
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700678#else
679 *ptep = pte;
680#endif
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700681}
682
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700683#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700684void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
685{
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800686 if (xen_iomap_pte(pte)) {
687 xen_set_iomap_pte(ptep, pte);
688 return;
689 }
690
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700691 set_64bit((u64 *)ptep, native_pte_val(pte));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700692}
693
694void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
695{
696 ptep->pte_low = 0;
697 smp_wmb(); /* make sure low gets written first */
698 ptep->pte_high = 0;
699}
700
701void xen_pmd_clear(pmd_t *pmdp)
702{
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100703 set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700704}
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700705#endif /* CONFIG_X86_PAE */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700706
Jeremy Fitzhardingeabf33032008-03-17 16:37:07 -0700707pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700708{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700709 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700710 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700711}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800712PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700713
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700714#if PAGETABLE_LEVELS == 4
715pudval_t xen_pud_val(pud_t pud)
716{
717 return pte_mfn_to_pfn(pud.pud);
718}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800719PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700720
721pud_t xen_make_pud(pudval_t pud)
722{
723 pud = pte_pfn_to_mfn(pud);
724
725 return native_make_pud(pud);
726}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800727PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700728
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700729pgd_t *xen_get_user_pgd(pgd_t *pgd)
730{
731 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
732 unsigned offset = pgd - pgd_page;
733 pgd_t *user_ptr = NULL;
734
735 if (offset < pgd_index(USER_LIMIT)) {
736 struct page *page = virt_to_page(pgd_page);
737 user_ptr = (pgd_t *)page->private;
738 if (user_ptr)
739 user_ptr += offset;
740 }
741
742 return user_ptr;
743}
744
745static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700746{
747 struct mmu_update u;
748
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700749 u.ptr = virt_to_machine(ptr).maddr;
750 u.val = pgd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700751 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700752}
753
754/*
755 * Raw hypercall-based set_pgd, intended for in early boot before
756 * there's a page structure. This implies:
757 * 1. The only existing pagetable is the kernel's
758 * 2. It is always pinned
759 * 3. It has no user pagetable attached to it
760 */
761void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
762{
763 preempt_disable();
764
765 xen_mc_batch();
766
767 __xen_set_pgd_hyper(ptr, val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700768
769 xen_mc_issue(PARAVIRT_LAZY_MMU);
770
771 preempt_enable();
772}
773
774void xen_set_pgd(pgd_t *ptr, pgd_t val)
775{
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700776 pgd_t *user_ptr = xen_get_user_pgd(ptr);
777
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700778 ADD_STATS(pgd_update, 1);
779
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700780 /* If page is not pinned, we can just update the entry
781 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700782 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700783 *ptr = val;
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700784 if (user_ptr) {
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700785 WARN_ON(xen_page_pinned(user_ptr));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700786 *user_ptr = val;
787 }
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700788 return;
789 }
790
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700791 ADD_STATS(pgd_update_pinned, 1);
792 ADD_STATS(pgd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
793
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700794 /* If it's pinned, then we can at least batch the kernel and
795 user updates together. */
796 xen_mc_batch();
797
798 __xen_set_pgd_hyper(ptr, val);
799 if (user_ptr)
800 __xen_set_pgd_hyper(user_ptr, val);
801
802 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700803}
804#endif /* PAGETABLE_LEVELS == 4 */
805
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700806/*
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700807 * (Yet another) pagetable walker. This one is intended for pinning a
808 * pagetable. This means that it walks a pagetable and calls the
809 * callback function on each page it finds making up the page table,
810 * at every level. It walks the entire pagetable, but it only bothers
811 * pinning pte pages which are below limit. In the normal case this
812 * will be STACK_TOP_MAX, but at boot we need to pin up to
813 * FIXADDR_TOP.
814 *
815 * For 32-bit the important bit is that we don't pin beyond there,
816 * because then we start getting into Xen's ptes.
817 *
818 * For 64-bit, we must skip the Xen hole in the middle of the address
819 * space, just after the big x86-64 virtual hole.
820 */
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000821static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
822 int (*func)(struct mm_struct *mm, struct page *,
823 enum pt_level),
824 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700825{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700826 int flush = 0;
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700827 unsigned hole_low, hole_high;
828 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
829 unsigned pgdidx, pudidx, pmdidx;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700830
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700831 /* The limit is the last byte to be touched */
832 limit--;
833 BUG_ON(limit >= FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700834
835 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700836 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700837
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700838 /*
839 * 64-bit has a great big hole in the middle of the address
840 * space, which contains the Xen mappings. On 32-bit these
841 * will end up making a zero-sized hole and so is a no-op.
842 */
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700843 hole_low = pgd_index(USER_LIMIT);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700844 hole_high = pgd_index(PAGE_OFFSET);
845
846 pgdidx_limit = pgd_index(limit);
847#if PTRS_PER_PUD > 1
848 pudidx_limit = pud_index(limit);
849#else
850 pudidx_limit = 0;
851#endif
852#if PTRS_PER_PMD > 1
853 pmdidx_limit = pmd_index(limit);
854#else
855 pmdidx_limit = 0;
856#endif
857
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700858 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700859 pud_t *pud;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700860
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700861 if (pgdidx >= hole_low && pgdidx < hole_high)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700862 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700863
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700864 if (!pgd_val(pgd[pgdidx]))
865 continue;
866
867 pud = pud_offset(&pgd[pgdidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700868
869 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700870 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700871
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700872 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700873 pmd_t *pmd;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700874
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700875 if (pgdidx == pgdidx_limit &&
876 pudidx > pudidx_limit)
877 goto out;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700878
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700879 if (pud_none(pud[pudidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700880 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700881
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700882 pmd = pmd_offset(&pud[pudidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700883
884 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700885 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700886
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700887 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
888 struct page *pte;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700889
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700890 if (pgdidx == pgdidx_limit &&
891 pudidx == pudidx_limit &&
892 pmdidx > pmdidx_limit)
893 goto out;
894
895 if (pmd_none(pmd[pmdidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700896 continue;
897
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700898 pte = pmd_page(pmd[pmdidx]);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700899 flush |= (*func)(mm, pte, PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700900 }
901 }
902 }
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700903
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700904out:
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700905 /* Do the top level last, so that the callbacks can use it as
906 a cue to do final things like tlb flushes. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700907 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700908
909 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700910}
911
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000912static int xen_pgd_walk(struct mm_struct *mm,
913 int (*func)(struct mm_struct *mm, struct page *,
914 enum pt_level),
915 unsigned long limit)
916{
917 return __xen_pgd_walk(mm, mm->pgd, func, limit);
918}
919
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700920/* If we're using split pte locks, then take the page's lock and
921 return a pointer to it. Otherwise return NULL. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700922static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700923{
924 spinlock_t *ptl = NULL;
925
Jeremy Fitzhardingef7d0b922008-09-09 15:43:22 -0700926#if USE_SPLIT_PTLOCKS
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700927 ptl = __pte_lockptr(page);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700928 spin_lock_nest_lock(ptl, &mm->page_table_lock);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700929#endif
930
931 return ptl;
932}
933
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700934static void xen_pte_unlock(void *v)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700935{
936 spinlock_t *ptl = v;
937 spin_unlock(ptl);
938}
939
940static void xen_do_pin(unsigned level, unsigned long pfn)
941{
942 struct mmuext_op *op;
943 struct multicall_space mcs;
944
945 mcs = __xen_mc_entry(sizeof(*op));
946 op = mcs.args;
947 op->cmd = level;
948 op->arg1.mfn = pfn_to_mfn(pfn);
949 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
950}
951
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700952static int xen_pin_page(struct mm_struct *mm, struct page *page,
953 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700954{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700955 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700956 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700957
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700958 if (pgfl)
959 flush = 0; /* already pinned */
960 else if (PageHighMem(page))
961 /* kmaps need flushing if we found an unpinned
962 highpage */
963 flush = 1;
964 else {
965 void *pt = lowmem_page_address(page);
966 unsigned long pfn = page_to_pfn(page);
967 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700968 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700969
970 flush = 0;
971
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700972 /*
973 * We need to hold the pagetable lock between the time
974 * we make the pagetable RO and when we actually pin
975 * it. If we don't, then other users may come in and
976 * attempt to update the pagetable by writing it,
977 * which will fail because the memory is RO but not
978 * pinned, so Xen won't do the trap'n'emulate.
979 *
980 * If we're using split pte locks, we can't hold the
981 * entire pagetable's worth of locks during the
982 * traverse, because we may wrap the preempt count (8
983 * bits). The solution is to mark RO and pin each PTE
984 * page while holding the lock. This means the number
985 * of locks we end up holding is never more than a
986 * batch size (~32 entries, at present).
987 *
988 * If we're not using split pte locks, we needn't pin
989 * the PTE pages independently, because we're
990 * protected by the overall pagetable lock.
991 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700992 ptl = NULL;
993 if (level == PT_PTE)
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700994 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700995
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700996 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
997 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700998 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
999
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -07001000 if (ptl) {
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001001 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
1002
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001003 /* Queue a deferred unlock for when this batch
1004 is completed. */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001005 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001006 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001007 }
1008
1009 return flush;
1010}
1011
1012/* This is called just after a mm has been created, but it has not
1013 been used yet. We need to make sure that its pagetable is all
1014 read-only, and can be pinned. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001015static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001016{
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +11001017 vm_unmap_aliases();
1018
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001019 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001020
Ian Campbell86bbc2c2008-11-21 10:21:33 +00001021 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +11001022 /* re-enable interrupts for flushing */
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001023 xen_mc_issue(0);
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +11001024
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001025 kmap_flush_unused();
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +11001026
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001027 xen_mc_batch();
1028 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001029
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001030#ifdef CONFIG_X86_64
1031 {
1032 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1033
1034 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
1035
1036 if (user_pgd) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001037 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
Tejf63c2f22008-12-16 11:56:06 -08001038 xen_do_pin(MMUEXT_PIN_L4_TABLE,
1039 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001040 }
1041 }
1042#else /* CONFIG_X86_32 */
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001043#ifdef CONFIG_X86_PAE
1044 /* Need to make sure unshared kernel PMD is pinnable */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -08001045 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001046 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001047#endif
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +01001048 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001049#endif /* CONFIG_X86_64 */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001050 xen_mc_issue(0);
1051}
1052
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001053static void xen_pgd_pin(struct mm_struct *mm)
1054{
1055 __xen_pgd_pin(mm, mm->pgd);
1056}
1057
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001058/*
1059 * On save, we need to pin all pagetables to make sure they get their
1060 * mfns turned into pfns. Search the list for any unpinned pgds and pin
1061 * them (unpinned pgds are not currently in use, probably because the
1062 * process is under construction or destruction).
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001063 *
1064 * Expected to be called in stop_machine() ("equivalent to taking
1065 * every spinlock in the system"), so the locking doesn't really
1066 * matter all that much.
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001067 */
1068void xen_mm_pin_all(void)
1069{
1070 unsigned long flags;
1071 struct page *page;
1072
1073 spin_lock_irqsave(&pgd_lock, flags);
1074
1075 list_for_each_entry(page, &pgd_list, lru) {
1076 if (!PagePinned(page)) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001077 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001078 SetPageSavePinned(page);
1079 }
1080 }
1081
1082 spin_unlock_irqrestore(&pgd_lock, flags);
1083}
1084
Eduardo Habkostc1f2f092008-07-08 15:06:24 -07001085/*
1086 * The init_mm pagetable is really pinned as soon as its created, but
1087 * that's before we have page structures to store the bits. So do all
1088 * the book-keeping now.
1089 */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001090static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page,
1091 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001092{
1093 SetPagePinned(page);
1094 return 0;
1095}
1096
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001097static void __init xen_mark_init_mm_pinned(void)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001098{
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001099 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001100}
1101
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001102static int xen_unpin_page(struct mm_struct *mm, struct page *page,
1103 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001104{
Christoph Lameterd60cd462008-04-28 02:12:51 -07001105 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001106
1107 if (pgfl && !PageHighMem(page)) {
1108 void *pt = lowmem_page_address(page);
1109 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001110 spinlock_t *ptl = NULL;
1111 struct multicall_space mcs;
1112
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -07001113 /*
1114 * Do the converse to pin_page. If we're using split
1115 * pte locks, we must be holding the lock for while
1116 * the pte page is unpinned but still RO to prevent
1117 * concurrent updates from seeing it in this
1118 * partially-pinned state.
1119 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001120 if (level == PT_PTE) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001121 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001122
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -07001123 if (ptl)
1124 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001125 }
1126
1127 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001128
1129 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
1130 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001131 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
1132
1133 if (ptl) {
1134 /* unlock when batch completed */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001135 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001136 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001137 }
1138
1139 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001140}
1141
1142/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001143static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001144{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001145 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001146
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001147 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001148
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001149#ifdef CONFIG_X86_64
1150 {
1151 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1152
1153 if (user_pgd) {
Tejf63c2f22008-12-16 11:56:06 -08001154 xen_do_pin(MMUEXT_UNPIN_TABLE,
1155 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001156 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001157 }
1158 }
1159#endif
1160
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001161#ifdef CONFIG_X86_PAE
1162 /* Need to make sure unshared kernel PMD is unpinned */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -08001163 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001164 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001165#endif
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001166
Ian Campbell86bbc2c2008-11-21 10:21:33 +00001167 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001168
1169 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001170}
1171
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001172static void xen_pgd_unpin(struct mm_struct *mm)
1173{
1174 __xen_pgd_unpin(mm, mm->pgd);
1175}
1176
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001177/*
1178 * On resume, undo any pinning done at save, so that the rest of the
1179 * kernel doesn't see any unexpected pinned pagetables.
1180 */
1181void xen_mm_unpin_all(void)
1182{
1183 unsigned long flags;
1184 struct page *page;
1185
1186 spin_lock_irqsave(&pgd_lock, flags);
1187
1188 list_for_each_entry(page, &pgd_list, lru) {
1189 if (PageSavePinned(page)) {
1190 BUG_ON(!PagePinned(page));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001191 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001192 ClearPageSavePinned(page);
1193 }
1194 }
1195
1196 spin_unlock_irqrestore(&pgd_lock, flags);
1197}
1198
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001199void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
1200{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001201 spin_lock(&next->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001202 xen_pgd_pin(next);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001203 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001204}
1205
1206void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
1207{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001208 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001209 xen_pgd_pin(mm);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001210 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001211}
1212
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001213
1214#ifdef CONFIG_SMP
1215/* Another cpu may still have their %cr3 pointing at the pagetable, so
1216 we need to repoint it somewhere else before we can unpin it. */
1217static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001218{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001219 struct mm_struct *mm = info;
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001220 struct mm_struct *active_mm;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001221
Brian Gerst9eb912d2009-01-19 00:38:57 +09001222 active_mm = percpu_read(cpu_tlbstate.active_mm);
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001223
1224 if (active_mm == mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001225 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001226
1227 /* If this cpu still has a stale cr3 reference, then make sure
1228 it has been flushed. */
Jeremy Fitzhardinge7fd7d832009-02-17 23:24:03 -08001229 if (percpu_read(xen_current_cr3) == __pa(mm->pgd))
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001230 load_cr3(swapper_pg_dir);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001231}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001232
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001233static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001234{
Mike Travise4d98202008-12-16 17:34:05 -08001235 cpumask_var_t mask;
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001236 unsigned cpu;
1237
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001238 if (current->active_mm == mm) {
1239 if (current->mm == mm)
1240 load_cr3(swapper_pg_dir);
1241 else
1242 leave_mm(smp_processor_id());
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001243 }
1244
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001245 /* Get the "official" set of cpus referring to our pagetable. */
Mike Travise4d98202008-12-16 17:34:05 -08001246 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1247 for_each_online_cpu(cpu) {
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001248 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
Mike Travise4d98202008-12-16 17:34:05 -08001249 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1250 continue;
1251 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1252 }
1253 return;
1254 }
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001255 cpumask_copy(mask, mm_cpumask(mm));
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001256
1257 /* It's possible that a vcpu may have a stale reference to our
1258 cr3, because its in lazy mode, and it hasn't yet flushed
1259 its set of pending hypercalls yet. In this case, we can
1260 look at its actual current cr3 value, and force it to flush
1261 if needed. */
1262 for_each_online_cpu(cpu) {
1263 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
Mike Travise4d98202008-12-16 17:34:05 -08001264 cpumask_set_cpu(cpu, mask);
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001265 }
1266
Mike Travise4d98202008-12-16 17:34:05 -08001267 if (!cpumask_empty(mask))
1268 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1269 free_cpumask_var(mask);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001270}
1271#else
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001272static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001273{
1274 if (current->active_mm == mm)
1275 load_cr3(swapper_pg_dir);
1276}
1277#endif
1278
1279/*
1280 * While a process runs, Xen pins its pagetables, which means that the
1281 * hypervisor forces it to be read-only, and it controls all updates
1282 * to it. This means that all pagetable updates have to go via the
1283 * hypervisor, which is moderately expensive.
1284 *
1285 * Since we're pulling the pagetable down, we switch to use init_mm,
1286 * unpin old process pagetable and mark it all read-write, which
1287 * allows further operations on it to be simple memory accesses.
1288 *
1289 * The only subtle point is that another CPU may be still using the
1290 * pagetable because of lazy tlb flushing. This means we need need to
1291 * switch all CPUs off this pagetable before we can unpin it.
1292 */
1293void xen_exit_mmap(struct mm_struct *mm)
1294{
1295 get_cpu(); /* make sure we don't move around */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001296 xen_drop_mm_ref(mm);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001297 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001298
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001299 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -07001300
1301 /* pgd may not be pinned in the error exit path of execve */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001302 if (xen_page_pinned(mm->pgd))
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001303 xen_pgd_unpin(mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001304
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001305 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001306}
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07001307
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001308static __init void xen_pagetable_setup_start(pgd_t *base)
1309{
1310}
1311
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02001312static void xen_post_allocator_init(void);
1313
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001314static __init void xen_pagetable_setup_done(pgd_t *base)
1315{
1316 xen_setup_shared_info();
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02001317 xen_post_allocator_init();
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001318}
1319
1320static void xen_write_cr2(unsigned long cr2)
1321{
1322 percpu_read(xen_vcpu)->arch.cr2 = cr2;
1323}
1324
1325static unsigned long xen_read_cr2(void)
1326{
1327 return percpu_read(xen_vcpu)->arch.cr2;
1328}
1329
1330unsigned long xen_read_cr2_direct(void)
1331{
1332 return percpu_read(xen_vcpu_info.arch.cr2);
1333}
1334
1335static void xen_flush_tlb(void)
1336{
1337 struct mmuext_op *op;
1338 struct multicall_space mcs;
1339
1340 preempt_disable();
1341
1342 mcs = xen_mc_entry(sizeof(*op));
1343
1344 op = mcs.args;
1345 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1346 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1347
1348 xen_mc_issue(PARAVIRT_LAZY_MMU);
1349
1350 preempt_enable();
1351}
1352
1353static void xen_flush_tlb_single(unsigned long addr)
1354{
1355 struct mmuext_op *op;
1356 struct multicall_space mcs;
1357
1358 preempt_disable();
1359
1360 mcs = xen_mc_entry(sizeof(*op));
1361 op = mcs.args;
1362 op->cmd = MMUEXT_INVLPG_LOCAL;
1363 op->arg1.linear_addr = addr & PAGE_MASK;
1364 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1365
1366 xen_mc_issue(PARAVIRT_LAZY_MMU);
1367
1368 preempt_enable();
1369}
1370
1371static void xen_flush_tlb_others(const struct cpumask *cpus,
1372 struct mm_struct *mm, unsigned long va)
1373{
1374 struct {
1375 struct mmuext_op op;
1376 DECLARE_BITMAP(mask, NR_CPUS);
1377 } *args;
1378 struct multicall_space mcs;
1379
Jeremy Fitzhardingee3f8a742009-03-04 17:36:57 -08001380 if (cpumask_empty(cpus))
1381 return; /* nothing to do */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001382
1383 mcs = xen_mc_entry(sizeof(*args));
1384 args = mcs.args;
1385 args->op.arg2.vcpumask = to_cpumask(args->mask);
1386
1387 /* Remove us, and any offline CPUS. */
1388 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1389 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001390
1391 if (va == TLB_FLUSH_ALL) {
1392 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1393 } else {
1394 args->op.cmd = MMUEXT_INVLPG_MULTI;
1395 args->op.arg1.linear_addr = va;
1396 }
1397
1398 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1399
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001400 xen_mc_issue(PARAVIRT_LAZY_MMU);
1401}
1402
1403static unsigned long xen_read_cr3(void)
1404{
1405 return percpu_read(xen_cr3);
1406}
1407
1408static void set_current_cr3(void *v)
1409{
1410 percpu_write(xen_current_cr3, (unsigned long)v);
1411}
1412
1413static void __xen_write_cr3(bool kernel, unsigned long cr3)
1414{
1415 struct mmuext_op *op;
1416 struct multicall_space mcs;
1417 unsigned long mfn;
1418
1419 if (cr3)
1420 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1421 else
1422 mfn = 0;
1423
1424 WARN_ON(mfn == 0 && kernel);
1425
1426 mcs = __xen_mc_entry(sizeof(*op));
1427
1428 op = mcs.args;
1429 op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1430 op->arg1.mfn = mfn;
1431
1432 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1433
1434 if (kernel) {
1435 percpu_write(xen_cr3, cr3);
1436
1437 /* Update xen_current_cr3 once the batch has actually
1438 been submitted. */
1439 xen_mc_callback(set_current_cr3, (void *)cr3);
1440 }
1441}
1442
1443static void xen_write_cr3(unsigned long cr3)
1444{
1445 BUG_ON(preemptible());
1446
1447 xen_mc_batch(); /* disables interrupts */
1448
1449 /* Update while interrupts are disabled, so its atomic with
1450 respect to ipis */
1451 percpu_write(xen_cr3, cr3);
1452
1453 __xen_write_cr3(true, cr3);
1454
1455#ifdef CONFIG_X86_64
1456 {
1457 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1458 if (user_pgd)
1459 __xen_write_cr3(false, __pa(user_pgd));
1460 else
1461 __xen_write_cr3(false, 0);
1462 }
1463#endif
1464
1465 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1466}
1467
1468static int xen_pgd_alloc(struct mm_struct *mm)
1469{
1470 pgd_t *pgd = mm->pgd;
1471 int ret = 0;
1472
1473 BUG_ON(PagePinned(virt_to_page(pgd)));
1474
1475#ifdef CONFIG_X86_64
1476 {
1477 struct page *page = virt_to_page(pgd);
1478 pgd_t *user_pgd;
1479
1480 BUG_ON(page->private != 0);
1481
1482 ret = -ENOMEM;
1483
1484 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1485 page->private = (unsigned long)user_pgd;
1486
1487 if (user_pgd != NULL) {
1488 user_pgd[pgd_index(VSYSCALL_START)] =
1489 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1490 ret = 0;
1491 }
1492
1493 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1494 }
1495#endif
1496
1497 return ret;
1498}
1499
1500static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1501{
1502#ifdef CONFIG_X86_64
1503 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1504
1505 if (user_pgd)
1506 free_page((unsigned long)user_pgd);
1507#endif
1508}
1509
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001510#ifdef CONFIG_X86_32
1511static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1512{
1513 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1514 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1515 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1516 pte_val_ma(pte));
1517
1518 return pte;
1519}
1520
1521/* Init-time set_pte while constructing initial pagetables, which
1522 doesn't allow RO pagetable pages to be remapped RW */
1523static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
1524{
1525 pte = mask_rw_pte(ptep, pte);
1526
1527 xen_set_pte(ptep, pte);
1528}
1529#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001530
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001531static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1532{
1533 struct mmuext_op op;
1534 op.cmd = cmd;
1535 op.arg1.mfn = pfn_to_mfn(pfn);
1536 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1537 BUG();
1538}
1539
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001540/* Early in boot, while setting up the initial pagetable, assume
1541 everything is pinned. */
1542static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1543{
1544#ifdef CONFIG_FLATMEM
1545 BUG_ON(mem_map); /* should only be used early */
1546#endif
1547 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001548 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1549}
1550
1551/* Used for pmd and pud */
1552static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1553{
1554#ifdef CONFIG_FLATMEM
1555 BUG_ON(mem_map); /* should only be used early */
1556#endif
1557 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001558}
1559
1560/* Early release_pte assumes that all pts are pinned, since there's
1561 only init_mm and anything attached to that is pinned. */
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001562static __init void xen_release_pte_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001563{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001564 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001565 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1566}
1567
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001568static __init void xen_release_pmd_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001569{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001570 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001571}
1572
1573/* This needs to make sure the new pte page is pinned iff its being
1574 attached to a pinned pagetable. */
1575static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
1576{
1577 struct page *page = pfn_to_page(pfn);
1578
1579 if (PagePinned(virt_to_page(mm->pgd))) {
1580 SetPagePinned(page);
1581
1582 vm_unmap_aliases();
1583 if (!PageHighMem(page)) {
1584 make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
1585 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1586 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1587 } else {
1588 /* make sure there are no stray mappings of
1589 this page */
1590 kmap_flush_unused();
1591 }
1592 }
1593}
1594
1595static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1596{
1597 xen_alloc_ptpage(mm, pfn, PT_PTE);
1598}
1599
1600static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1601{
1602 xen_alloc_ptpage(mm, pfn, PT_PMD);
1603}
1604
1605/* This should never happen until we're OK to use struct page */
1606static void xen_release_ptpage(unsigned long pfn, unsigned level)
1607{
1608 struct page *page = pfn_to_page(pfn);
1609
1610 if (PagePinned(page)) {
1611 if (!PageHighMem(page)) {
1612 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1613 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1614 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1615 }
1616 ClearPagePinned(page);
1617 }
1618}
1619
1620static void xen_release_pte(unsigned long pfn)
1621{
1622 xen_release_ptpage(pfn, PT_PTE);
1623}
1624
1625static void xen_release_pmd(unsigned long pfn)
1626{
1627 xen_release_ptpage(pfn, PT_PMD);
1628}
1629
1630#if PAGETABLE_LEVELS == 4
1631static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1632{
1633 xen_alloc_ptpage(mm, pfn, PT_PUD);
1634}
1635
1636static void xen_release_pud(unsigned long pfn)
1637{
1638 xen_release_ptpage(pfn, PT_PUD);
1639}
1640#endif
1641
1642void __init xen_reserve_top(void)
1643{
1644#ifdef CONFIG_X86_32
1645 unsigned long top = HYPERVISOR_VIRT_START;
1646 struct xen_platform_parameters pp;
1647
1648 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1649 top = pp.virt_start;
1650
1651 reserve_top_address(-top);
1652#endif /* CONFIG_X86_32 */
1653}
1654
1655/*
1656 * Like __va(), but returns address in the kernel mapping (which is
1657 * all we have until the physical memory mapping has been set up.
1658 */
1659static void *__ka(phys_addr_t paddr)
1660{
1661#ifdef CONFIG_X86_64
1662 return (void *)(paddr + __START_KERNEL_map);
1663#else
1664 return __va(paddr);
1665#endif
1666}
1667
1668/* Convert a machine address to physical address */
1669static unsigned long m2p(phys_addr_t maddr)
1670{
1671 phys_addr_t paddr;
1672
1673 maddr &= PTE_PFN_MASK;
1674 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1675
1676 return paddr;
1677}
1678
1679/* Convert a machine address to kernel virtual */
1680static void *m2v(phys_addr_t maddr)
1681{
1682 return __ka(m2p(maddr));
1683}
1684
1685static void set_page_prot(void *addr, pgprot_t prot)
1686{
1687 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1688 pte_t pte = pfn_pte(pfn, prot);
1689
1690 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1691 BUG();
1692}
1693
1694static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1695{
1696 unsigned pmdidx, pteidx;
1697 unsigned ident_pte;
1698 unsigned long pfn;
1699
1700 ident_pte = 0;
1701 pfn = 0;
1702 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1703 pte_t *pte_page;
1704
1705 /* Reuse or allocate a page of ptes */
1706 if (pmd_present(pmd[pmdidx]))
1707 pte_page = m2v(pmd[pmdidx].pmd);
1708 else {
1709 /* Check for free pte pages */
1710 if (ident_pte == ARRAY_SIZE(level1_ident_pgt))
1711 break;
1712
1713 pte_page = &level1_ident_pgt[ident_pte];
1714 ident_pte += PTRS_PER_PTE;
1715
1716 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1717 }
1718
1719 /* Install mappings */
1720 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1721 pte_t pte;
1722
1723 if (pfn > max_pfn_mapped)
1724 max_pfn_mapped = pfn;
1725
1726 if (!pte_none(pte_page[pteidx]))
1727 continue;
1728
1729 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1730 pte_page[pteidx] = pte;
1731 }
1732 }
1733
1734 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1735 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1736
1737 set_page_prot(pmd, PAGE_KERNEL_RO);
1738}
1739
1740#ifdef CONFIG_X86_64
1741static void convert_pfn_mfn(void *v)
1742{
1743 pte_t *pte = v;
1744 int i;
1745
1746 /* All levels are converted the same way, so just treat them
1747 as ptes. */
1748 for (i = 0; i < PTRS_PER_PTE; i++)
1749 pte[i] = xen_make_pte(pte[i].pte);
1750}
1751
1752/*
1753 * Set up the inital kernel pagetable.
1754 *
1755 * We can construct this by grafting the Xen provided pagetable into
1756 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1757 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
1758 * means that only the kernel has a physical mapping to start with -
1759 * but that's enough to get __va working. We need to fill in the rest
1760 * of the physical mapping once some sort of allocator has been set
1761 * up.
1762 */
1763__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1764 unsigned long max_pfn)
1765{
1766 pud_t *l3;
1767 pmd_t *l2;
1768
1769 /* Zap identity mapping */
1770 init_level4_pgt[0] = __pgd(0);
1771
1772 /* Pre-constructed entries are in pfn, so convert to mfn */
1773 convert_pfn_mfn(init_level4_pgt);
1774 convert_pfn_mfn(level3_ident_pgt);
1775 convert_pfn_mfn(level3_kernel_pgt);
1776
1777 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1778 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1779
1780 memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1781 memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1782
1783 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1784 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
1785 memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1786
1787 /* Set up identity map */
1788 xen_map_identity_early(level2_ident_pgt, max_pfn);
1789
1790 /* Make pagetable pieces RO */
1791 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1792 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1793 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1794 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1795 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1796 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1797
1798 /* Pin down new L4 */
1799 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1800 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1801
1802 /* Unpin Xen-provided one */
1803 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1804
1805 /* Switch over */
1806 pgd = init_level4_pgt;
1807
1808 /*
1809 * At this stage there can be no user pgd, and no page
1810 * structure to attach it to, so make sure we just set kernel
1811 * pgd.
1812 */
1813 xen_mc_batch();
1814 __xen_write_cr3(true, __pa(pgd));
1815 xen_mc_issue(PARAVIRT_LAZY_CPU);
1816
1817 reserve_early(__pa(xen_start_info->pt_base),
1818 __pa(xen_start_info->pt_base +
1819 xen_start_info->nr_pt_frames * PAGE_SIZE),
1820 "XEN PAGETABLES");
1821
1822 return pgd;
1823}
1824#else /* !CONFIG_X86_64 */
1825static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss;
1826
1827__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1828 unsigned long max_pfn)
1829{
1830 pmd_t *kernel_pmd;
1831
Jeremy Fitzhardinge93dbda72009-02-26 17:35:44 -08001832 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
1833 xen_start_info->nr_pt_frames * PAGE_SIZE +
1834 512*1024);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001835
1836 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
1837 memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
1838
1839 xen_map_identity_early(level2_kernel_pgt, max_pfn);
1840
1841 memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
1842 set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY],
1843 __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT));
1844
1845 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1846 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1847 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
1848
1849 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1850
1851 xen_write_cr3(__pa(swapper_pg_dir));
1852
1853 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));
1854
Jeremy Fitzhardinge33df4db2009-05-07 11:56:44 -07001855 reserve_early(__pa(xen_start_info->pt_base),
1856 __pa(xen_start_info->pt_base +
1857 xen_start_info->nr_pt_frames * PAGE_SIZE),
1858 "XEN PAGETABLES");
1859
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001860 return swapper_pg_dir;
1861}
1862#endif /* CONFIG_X86_64 */
1863
Masami Hiramatsu3b3809a2009-04-09 10:55:33 -07001864static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001865{
1866 pte_t pte;
1867
1868 phys >>= PAGE_SHIFT;
1869
1870 switch (idx) {
1871 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
1872#ifdef CONFIG_X86_F00F_BUG
1873 case FIX_F00F_IDT:
1874#endif
1875#ifdef CONFIG_X86_32
1876 case FIX_WP_TEST:
1877 case FIX_VDSO:
1878# ifdef CONFIG_HIGHMEM
1879 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
1880# endif
1881#else
1882 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
1883#endif
1884#ifdef CONFIG_X86_LOCAL_APIC
1885 case FIX_APIC_BASE: /* maps dummy local APIC */
1886#endif
Jeremy Fitzhardinge3ecb1b72009-03-07 23:48:41 -08001887 case FIX_TEXT_POKE0:
1888 case FIX_TEXT_POKE1:
1889 /* All local page mappings */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001890 pte = pfn_pte(phys, prot);
1891 break;
1892
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08001893 case FIX_PARAVIRT_BOOTMAP:
1894 /* This is an MFN, but it isn't an IO mapping from the
1895 IO domain */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001896 pte = mfn_pte(phys, prot);
1897 break;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08001898
1899 default:
1900 /* By default, set_fixmap is used for hardware mappings */
1901 pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP));
1902 break;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001903 }
1904
1905 __native_set_fixmap(idx, pte);
1906
1907#ifdef CONFIG_X86_64
1908 /* Replicate changes to map the vsyscall page into the user
1909 pagetable vsyscall mapping. */
1910 if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) {
1911 unsigned long vaddr = __fix_to_virt(idx);
1912 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
1913 }
1914#endif
1915}
1916
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02001917static __init void xen_post_allocator_init(void)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001918{
1919 pv_mmu_ops.set_pte = xen_set_pte;
1920 pv_mmu_ops.set_pmd = xen_set_pmd;
1921 pv_mmu_ops.set_pud = xen_set_pud;
1922#if PAGETABLE_LEVELS == 4
1923 pv_mmu_ops.set_pgd = xen_set_pgd;
1924#endif
1925
1926 /* This will work as long as patching hasn't happened yet
1927 (which it hasn't) */
1928 pv_mmu_ops.alloc_pte = xen_alloc_pte;
1929 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
1930 pv_mmu_ops.release_pte = xen_release_pte;
1931 pv_mmu_ops.release_pmd = xen_release_pmd;
1932#if PAGETABLE_LEVELS == 4
1933 pv_mmu_ops.alloc_pud = xen_alloc_pud;
1934 pv_mmu_ops.release_pud = xen_release_pud;
1935#endif
1936
1937#ifdef CONFIG_X86_64
1938 SetPagePinned(virt_to_page(level3_user_vsyscall));
1939#endif
1940 xen_mark_init_mm_pinned();
1941}
1942
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08001943static void xen_leave_lazy_mmu(void)
1944{
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08001945 preempt_disable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08001946 xen_mc_flush();
1947 paravirt_leave_lazy_mmu();
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08001948 preempt_enable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08001949}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001950
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02001951static const struct pv_mmu_ops xen_mmu_ops __initdata = {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001952 .read_cr2 = xen_read_cr2,
1953 .write_cr2 = xen_write_cr2,
1954
1955 .read_cr3 = xen_read_cr3,
1956 .write_cr3 = xen_write_cr3,
1957
1958 .flush_tlb_user = xen_flush_tlb,
1959 .flush_tlb_kernel = xen_flush_tlb,
1960 .flush_tlb_single = xen_flush_tlb_single,
1961 .flush_tlb_others = xen_flush_tlb_others,
1962
1963 .pte_update = paravirt_nop,
1964 .pte_update_defer = paravirt_nop,
1965
1966 .pgd_alloc = xen_pgd_alloc,
1967 .pgd_free = xen_pgd_free,
1968
1969 .alloc_pte = xen_alloc_pte_init,
1970 .release_pte = xen_release_pte_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001971 .alloc_pmd = xen_alloc_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001972 .alloc_pmd_clone = paravirt_nop,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001973 .release_pmd = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001974
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001975#ifdef CONFIG_X86_64
1976 .set_pte = xen_set_pte,
1977#else
1978 .set_pte = xen_set_pte_init,
1979#endif
1980 .set_pte_at = xen_set_pte_at,
1981 .set_pmd = xen_set_pmd_hyper,
1982
1983 .ptep_modify_prot_start = __ptep_modify_prot_start,
1984 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
1985
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08001986 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
1987 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001988
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08001989 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
1990 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001991
1992#ifdef CONFIG_X86_PAE
1993 .set_pte_atomic = xen_set_pte_atomic,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001994 .pte_clear = xen_pte_clear,
1995 .pmd_clear = xen_pmd_clear,
1996#endif /* CONFIG_X86_PAE */
1997 .set_pud = xen_set_pud_hyper,
1998
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08001999 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2000 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002001
2002#if PAGETABLE_LEVELS == 4
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002003 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2004 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002005 .set_pgd = xen_set_pgd_hyper,
2006
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002007 .alloc_pud = xen_alloc_pmd_init,
2008 .release_pud = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002009#endif /* PAGETABLE_LEVELS == 4 */
2010
2011 .activate_mm = xen_activate_mm,
2012 .dup_mmap = xen_dup_mmap,
2013 .exit_mmap = xen_exit_mmap,
2014
2015 .lazy_mode = {
2016 .enter = paravirt_enter_lazy_mmu,
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002017 .leave = xen_leave_lazy_mmu,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002018 },
2019
2020 .set_fixmap = xen_set_fixmap,
2021};
2022
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002023void __init xen_init_mmu_ops(void)
2024{
2025 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
2026 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
2027 pv_mmu_ops = xen_mmu_ops;
2028}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002029
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07002030#ifdef CONFIG_XEN_DEBUG_FS
2031
2032static struct dentry *d_mmu_debug;
2033
2034static int __init xen_mmu_debugfs(void)
2035{
2036 struct dentry *d_xen = xen_init_debugfs();
2037
2038 if (d_xen == NULL)
2039 return -ENOMEM;
2040
2041 d_mmu_debug = debugfs_create_dir("mmu", d_xen);
2042
2043 debugfs_create_u8("zero_stats", 0644, d_mmu_debug, &zero_stats);
2044
2045 debugfs_create_u32("pgd_update", 0444, d_mmu_debug, &mmu_stats.pgd_update);
2046 debugfs_create_u32("pgd_update_pinned", 0444, d_mmu_debug,
2047 &mmu_stats.pgd_update_pinned);
2048 debugfs_create_u32("pgd_update_batched", 0444, d_mmu_debug,
2049 &mmu_stats.pgd_update_pinned);
2050
2051 debugfs_create_u32("pud_update", 0444, d_mmu_debug, &mmu_stats.pud_update);
2052 debugfs_create_u32("pud_update_pinned", 0444, d_mmu_debug,
2053 &mmu_stats.pud_update_pinned);
2054 debugfs_create_u32("pud_update_batched", 0444, d_mmu_debug,
2055 &mmu_stats.pud_update_pinned);
2056
2057 debugfs_create_u32("pmd_update", 0444, d_mmu_debug, &mmu_stats.pmd_update);
2058 debugfs_create_u32("pmd_update_pinned", 0444, d_mmu_debug,
2059 &mmu_stats.pmd_update_pinned);
2060 debugfs_create_u32("pmd_update_batched", 0444, d_mmu_debug,
2061 &mmu_stats.pmd_update_pinned);
2062
2063 debugfs_create_u32("pte_update", 0444, d_mmu_debug, &mmu_stats.pte_update);
2064// debugfs_create_u32("pte_update_pinned", 0444, d_mmu_debug,
2065// &mmu_stats.pte_update_pinned);
2066 debugfs_create_u32("pte_update_batched", 0444, d_mmu_debug,
2067 &mmu_stats.pte_update_pinned);
2068
2069 debugfs_create_u32("mmu_update", 0444, d_mmu_debug, &mmu_stats.mmu_update);
2070 debugfs_create_u32("mmu_update_extended", 0444, d_mmu_debug,
2071 &mmu_stats.mmu_update_extended);
2072 xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug,
2073 mmu_stats.mmu_update_histo, 20);
2074
2075 debugfs_create_u32("set_pte_at", 0444, d_mmu_debug, &mmu_stats.set_pte_at);
2076 debugfs_create_u32("set_pte_at_batched", 0444, d_mmu_debug,
2077 &mmu_stats.set_pte_at_batched);
2078 debugfs_create_u32("set_pte_at_current", 0444, d_mmu_debug,
2079 &mmu_stats.set_pte_at_current);
2080 debugfs_create_u32("set_pte_at_kernel", 0444, d_mmu_debug,
2081 &mmu_stats.set_pte_at_kernel);
2082
2083 debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit);
2084 debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug,
2085 &mmu_stats.prot_commit_batched);
2086
2087 return 0;
2088}
2089fs_initcall(xen_mmu_debugfs);
2090
2091#endif /* CONFIG_XEN_DEBUG_FS */