blob: aba77b2b7d1853a188f42696e92255091de041b2 [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070043#include <linux/debugfs.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044#include <linux/bug.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070045
46#include <asm/pgtable.h>
47#include <asm/tlbflush.h>
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -070048#include <asm/fixmap.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070049#include <asm/mmu_context.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070050#include <asm/paravirt.h>
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070051#include <asm/linkage.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070052
53#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070054#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070055
56#include <xen/page.h>
57#include <xen/interface/xen.h>
58
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070059#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070060#include "mmu.h"
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070061#include "debugfs.h"
62
63#define MMU_UPDATE_HISTO 30
64
65#ifdef CONFIG_XEN_DEBUG_FS
66
67static struct {
68 u32 pgd_update;
69 u32 pgd_update_pinned;
70 u32 pgd_update_batched;
71
72 u32 pud_update;
73 u32 pud_update_pinned;
74 u32 pud_update_batched;
75
76 u32 pmd_update;
77 u32 pmd_update_pinned;
78 u32 pmd_update_batched;
79
80 u32 pte_update;
81 u32 pte_update_pinned;
82 u32 pte_update_batched;
83
84 u32 mmu_update;
85 u32 mmu_update_extended;
86 u32 mmu_update_histo[MMU_UPDATE_HISTO];
87
88 u32 prot_commit;
89 u32 prot_commit_batched;
90
91 u32 set_pte_at;
92 u32 set_pte_at_batched;
93 u32 set_pte_at_pinned;
94 u32 set_pte_at_current;
95 u32 set_pte_at_kernel;
96} mmu_stats;
97
98static u8 zero_stats;
99
100static inline void check_zero(void)
101{
102 if (unlikely(zero_stats)) {
103 memset(&mmu_stats, 0, sizeof(mmu_stats));
104 zero_stats = 0;
105 }
106}
107
108#define ADD_STATS(elem, val) \
109 do { check_zero(); mmu_stats.elem += (val); } while(0)
110
111#else /* !CONFIG_XEN_DEBUG_FS */
112
113#define ADD_STATS(elem, val) do { (void)(val); } while(0)
114
115#endif /* CONFIG_XEN_DEBUG_FS */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700116
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700117/*
118 * Just beyond the highest usermode address. STACK_TOP_MAX has a
119 * redzone above it, so round it up to a PGD boundary.
120 */
121#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
122
123
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100124#define P2M_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100125#define TOP_ENTRIES (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100126
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100127/* Placeholder for holes in the address space */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -0700128static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE] __page_aligned_data =
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100129 { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };
130
131 /* Array of pointers to pages containing p2m entries */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -0700132static unsigned long *p2m_top[TOP_ENTRIES] __page_aligned_data =
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100133 { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100134
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100135/* Arrays of p2m arrays expressed in mfns used for save/restore */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -0700136static unsigned long p2m_top_mfn[TOP_ENTRIES] __page_aligned_bss;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100137
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -0700138static unsigned long p2m_top_mfn_list[TOP_ENTRIES / P2M_ENTRIES_PER_PAGE]
139 __page_aligned_bss;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100140
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100141static inline unsigned p2m_top_index(unsigned long pfn)
142{
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100143 BUG_ON(pfn >= MAX_DOMAIN_PAGES);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100144 return pfn / P2M_ENTRIES_PER_PAGE;
145}
146
147static inline unsigned p2m_index(unsigned long pfn)
148{
149 return pfn % P2M_ENTRIES_PER_PAGE;
150}
151
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100152/* Build the parallel p2m_top_mfn structures */
153void xen_setup_mfn_list_list(void)
154{
155 unsigned pfn, idx;
156
157 for(pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
158 unsigned topidx = p2m_top_index(pfn);
159
160 p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]);
161 }
162
163 for(idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
164 unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
165 p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
166 }
167
168 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
169
170 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
171 virt_to_mfn(p2m_top_mfn_list);
172 HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages;
173}
174
175/* Set up p2m_top to point to the domain-builder provided p2m pages */
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100176void __init xen_build_dynamic_phys_to_machine(void)
177{
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100178 unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100179 unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100180 unsigned pfn;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100181
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100182 for(pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100183 unsigned topidx = p2m_top_index(pfn);
184
185 p2m_top[topidx] = &mfn_list[pfn];
186 }
187}
188
189unsigned long get_phys_to_machine(unsigned long pfn)
190{
191 unsigned topidx, idx;
192
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100193 if (unlikely(pfn >= MAX_DOMAIN_PAGES))
194 return INVALID_P2M_ENTRY;
195
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100196 topidx = p2m_top_index(pfn);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100197 idx = p2m_index(pfn);
198 return p2m_top[topidx][idx];
199}
Ingo Molnar15ce60052008-06-02 13:20:11 +0200200EXPORT_SYMBOL_GPL(get_phys_to_machine);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100201
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100202static void alloc_p2m(unsigned long **pp, unsigned long *mfnp)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100203{
204 unsigned long *p;
205 unsigned i;
206
207 p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
208 BUG_ON(p == NULL);
209
210 for(i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
211 p[i] = INVALID_P2M_ENTRY;
212
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100213 if (cmpxchg(pp, p2m_missing, p) != p2m_missing)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100214 free_page((unsigned long)p);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100215 else
216 *mfnp = virt_to_mfn(p);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100217}
218
219void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
220{
221 unsigned topidx, idx;
222
223 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
224 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
225 return;
226 }
227
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100228 if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
229 BUG_ON(mfn != INVALID_P2M_ENTRY);
230 return;
231 }
232
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100233 topidx = p2m_top_index(pfn);
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100234 if (p2m_top[topidx] == p2m_missing) {
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100235 /* no need to allocate a page to store an invalid entry */
236 if (mfn == INVALID_P2M_ENTRY)
237 return;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100238 alloc_p2m(&p2m_top[topidx], &p2m_top_mfn[topidx]);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100239 }
240
241 idx = p2m_index(pfn);
242 p2m_top[topidx][idx] = mfn;
243}
244
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700245xmaddr_t arbitrary_virt_to_machine(void *vaddr)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700246{
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700247 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100248 unsigned int level;
Chris Lalancette9f32d212008-10-23 17:40:25 -0700249 pte_t *pte;
250 unsigned offset;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700251
Chris Lalancette9f32d212008-10-23 17:40:25 -0700252 /*
253 * if the PFN is in the linear mapped vaddr range, we can just use
254 * the (quick) virt_to_machine() p2m lookup
255 */
256 if (virt_addr_valid(vaddr))
257 return virt_to_machine(vaddr);
258
259 /* otherwise we have to do a (slower) full page-table walk */
260
261 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700262 BUG_ON(pte == NULL);
Chris Lalancette9f32d212008-10-23 17:40:25 -0700263 offset = address & ~PAGE_MASK;
Jeremy Fitzhardingeebd879e2008-07-08 15:06:54 -0700264 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700265}
266
267void make_lowmem_page_readonly(void *vaddr)
268{
269 pte_t *pte, ptev;
270 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100271 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700272
Ingo Molnarf0646e42008-01-30 13:33:43 +0100273 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700274 BUG_ON(pte == NULL);
275
276 ptev = pte_wrprotect(*pte);
277
278 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
279 BUG();
280}
281
282void make_lowmem_page_readwrite(void *vaddr)
283{
284 pte_t *pte, ptev;
285 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100286 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700287
Ingo Molnarf0646e42008-01-30 13:33:43 +0100288 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700289 BUG_ON(pte == NULL);
290
291 ptev = pte_mkwrite(*pte);
292
293 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
294 BUG();
295}
296
297
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700298static bool xen_page_pinned(void *ptr)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100299{
300 struct page *page = virt_to_page(ptr);
301
302 return PagePinned(page);
303}
304
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700305static void xen_extend_mmu_update(const struct mmu_update *update)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700306{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700307 struct multicall_space mcs;
308 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700309
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700310 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
311
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700312 if (mcs.mc != NULL) {
313 ADD_STATS(mmu_update_extended, 1);
314 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1);
315
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700316 mcs.mc->args[1]++;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700317
318 if (mcs.mc->args[1] < MMU_UPDATE_HISTO)
319 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1);
320 else
321 ADD_STATS(mmu_update_histo[0], 1);
322 } else {
323 ADD_STATS(mmu_update, 1);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700324 mcs = __xen_mc_entry(sizeof(*u));
325 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700326 ADD_STATS(mmu_update_histo[1], 1);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700327 }
328
329 u = mcs.args;
330 *u = *update;
331}
332
333void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
334{
335 struct mmu_update u;
336
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700337 preempt_disable();
338
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700339 xen_mc_batch();
340
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700341 /* ptr may be ioremapped for 64-bit pagetable setup */
342 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700343 u.val = pmd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700344 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700345
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700346 ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
347
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700348 xen_mc_issue(PARAVIRT_LAZY_MMU);
349
350 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700351}
352
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100353void xen_set_pmd(pmd_t *ptr, pmd_t val)
354{
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700355 ADD_STATS(pmd_update, 1);
356
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100357 /* If page is not pinned, we can just update the entry
358 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700359 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100360 *ptr = val;
361 return;
362 }
363
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700364 ADD_STATS(pmd_update_pinned, 1);
365
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100366 xen_set_pmd_hyper(ptr, val);
367}
368
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700369/*
370 * Associate a virtual page frame with a given physical page frame
371 * and protection flags for that frame.
372 */
373void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
374{
Jeremy Fitzhardinge836fe2f2008-07-08 15:06:58 -0700375 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700376}
377
378void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
379 pte_t *ptep, pte_t pteval)
380{
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700381 /* updates to init_mm may be done without lock */
382 if (mm == &init_mm)
383 preempt_disable();
384
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700385 ADD_STATS(set_pte_at, 1);
386// ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep));
387 ADD_STATS(set_pte_at_current, mm == current->mm);
388 ADD_STATS(set_pte_at_kernel, mm == &init_mm);
389
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700390 if (mm == current->mm || mm == &init_mm) {
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700391 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700392 struct multicall_space mcs;
393 mcs = xen_mc_entry(0);
394
395 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700396 ADD_STATS(set_pte_at_batched, 1);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700397 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700398 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700399 } else
400 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700401 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700402 }
403 xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700404
405out:
406 if (mm == &init_mm)
407 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700408}
409
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700410pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
411{
412 /* Just return the pte as-is. We preserve the bits on commit */
413 return *ptep;
414}
415
416void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
417 pte_t *ptep, pte_t pte)
418{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700419 struct mmu_update u;
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700420
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700421 xen_mc_batch();
422
Chris Lalancette9f32d212008-10-23 17:40:25 -0700423 u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700424 u.val = pte_val_ma(pte);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700425 xen_extend_mmu_update(&u);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700426
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700427 ADD_STATS(prot_commit, 1);
428 ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
429
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700430 xen_mc_issue(PARAVIRT_LAZY_MMU);
431}
432
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700433/* Assume pteval_t is equivalent to all the other *val_t types. */
434static pteval_t pte_mfn_to_pfn(pteval_t val)
435{
436 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700437 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700438 pteval_t flags = val & PTE_FLAGS_MASK;
Jeremy Fitzhardinged8355ac2008-07-03 22:10:18 -0700439 val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700440 }
441
442 return val;
443}
444
445static pteval_t pte_pfn_to_mfn(pteval_t val)
446{
447 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700448 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700449 pteval_t flags = val & PTE_FLAGS_MASK;
Jeremy Fitzhardinged8355ac2008-07-03 22:10:18 -0700450 val = ((pteval_t)pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700451 }
452
453 return val;
454}
455
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700456pteval_t xen_pte_val(pte_t pte)
457{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700458 return pte_mfn_to_pfn(pte.pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700459}
460
461pgdval_t xen_pgd_val(pgd_t pgd)
462{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700463 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700464}
465
466pte_t xen_make_pte(pteval_t pte)
467{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700468 pte = pte_pfn_to_mfn(pte);
469 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700470}
471
472pgd_t xen_make_pgd(pgdval_t pgd)
473{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700474 pgd = pte_pfn_to_mfn(pgd);
475 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700476}
477
478pmdval_t xen_pmd_val(pmd_t pmd)
479{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700480 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700481}
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100482
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100483void xen_set_pud_hyper(pud_t *ptr, pud_t val)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700484{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700485 struct mmu_update u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700486
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700487 preempt_disable();
488
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700489 xen_mc_batch();
490
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700491 /* ptr may be ioremapped for 64-bit pagetable setup */
492 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700493 u.val = pud_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700494 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700495
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700496 ADD_STATS(pud_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
497
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700498 xen_mc_issue(PARAVIRT_LAZY_MMU);
499
500 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700501}
502
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100503void xen_set_pud(pud_t *ptr, pud_t val)
504{
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700505 ADD_STATS(pud_update, 1);
506
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100507 /* If page is not pinned, we can just update the entry
508 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700509 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100510 *ptr = val;
511 return;
512 }
513
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700514 ADD_STATS(pud_update_pinned, 1);
515
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100516 xen_set_pud_hyper(ptr, val);
517}
518
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700519void xen_set_pte(pte_t *ptep, pte_t pte)
520{
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700521 ADD_STATS(pte_update, 1);
522// ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
523 ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
524
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700525#ifdef CONFIG_X86_PAE
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700526 ptep->pte_high = pte.pte_high;
527 smp_wmb();
528 ptep->pte_low = pte.pte_low;
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700529#else
530 *ptep = pte;
531#endif
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700532}
533
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700534#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700535void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
536{
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700537 set_64bit((u64 *)ptep, native_pte_val(pte));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700538}
539
540void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
541{
542 ptep->pte_low = 0;
543 smp_wmb(); /* make sure low gets written first */
544 ptep->pte_high = 0;
545}
546
547void xen_pmd_clear(pmd_t *pmdp)
548{
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100549 set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700550}
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700551#endif /* CONFIG_X86_PAE */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700552
Jeremy Fitzhardingeabf33032008-03-17 16:37:07 -0700553pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700554{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700555 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700556 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700557}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700558
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700559#if PAGETABLE_LEVELS == 4
560pudval_t xen_pud_val(pud_t pud)
561{
562 return pte_mfn_to_pfn(pud.pud);
563}
564
565pud_t xen_make_pud(pudval_t pud)
566{
567 pud = pte_pfn_to_mfn(pud);
568
569 return native_make_pud(pud);
570}
571
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700572pgd_t *xen_get_user_pgd(pgd_t *pgd)
573{
574 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
575 unsigned offset = pgd - pgd_page;
576 pgd_t *user_ptr = NULL;
577
578 if (offset < pgd_index(USER_LIMIT)) {
579 struct page *page = virt_to_page(pgd_page);
580 user_ptr = (pgd_t *)page->private;
581 if (user_ptr)
582 user_ptr += offset;
583 }
584
585 return user_ptr;
586}
587
588static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700589{
590 struct mmu_update u;
591
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700592 u.ptr = virt_to_machine(ptr).maddr;
593 u.val = pgd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700594 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700595}
596
597/*
598 * Raw hypercall-based set_pgd, intended for in early boot before
599 * there's a page structure. This implies:
600 * 1. The only existing pagetable is the kernel's
601 * 2. It is always pinned
602 * 3. It has no user pagetable attached to it
603 */
604void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
605{
606 preempt_disable();
607
608 xen_mc_batch();
609
610 __xen_set_pgd_hyper(ptr, val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700611
612 xen_mc_issue(PARAVIRT_LAZY_MMU);
613
614 preempt_enable();
615}
616
617void xen_set_pgd(pgd_t *ptr, pgd_t val)
618{
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700619 pgd_t *user_ptr = xen_get_user_pgd(ptr);
620
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700621 ADD_STATS(pgd_update, 1);
622
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700623 /* If page is not pinned, we can just update the entry
624 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700625 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700626 *ptr = val;
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700627 if (user_ptr) {
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700628 WARN_ON(xen_page_pinned(user_ptr));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700629 *user_ptr = val;
630 }
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700631 return;
632 }
633
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700634 ADD_STATS(pgd_update_pinned, 1);
635 ADD_STATS(pgd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
636
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700637 /* If it's pinned, then we can at least batch the kernel and
638 user updates together. */
639 xen_mc_batch();
640
641 __xen_set_pgd_hyper(ptr, val);
642 if (user_ptr)
643 __xen_set_pgd_hyper(user_ptr, val);
644
645 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700646}
647#endif /* PAGETABLE_LEVELS == 4 */
648
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700649/*
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700650 * (Yet another) pagetable walker. This one is intended for pinning a
651 * pagetable. This means that it walks a pagetable and calls the
652 * callback function on each page it finds making up the page table,
653 * at every level. It walks the entire pagetable, but it only bothers
654 * pinning pte pages which are below limit. In the normal case this
655 * will be STACK_TOP_MAX, but at boot we need to pin up to
656 * FIXADDR_TOP.
657 *
658 * For 32-bit the important bit is that we don't pin beyond there,
659 * because then we start getting into Xen's ptes.
660 *
661 * For 64-bit, we must skip the Xen hole in the middle of the address
662 * space, just after the big x86-64 virtual hole.
663 */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700664static int xen_pgd_walk(struct mm_struct *mm,
665 int (*func)(struct mm_struct *mm, struct page *,
666 enum pt_level),
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700667 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700668{
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700669 pgd_t *pgd = mm->pgd;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700670 int flush = 0;
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700671 unsigned hole_low, hole_high;
672 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
673 unsigned pgdidx, pudidx, pmdidx;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700674
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700675 /* The limit is the last byte to be touched */
676 limit--;
677 BUG_ON(limit >= FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700678
679 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700680 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700681
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700682 /*
683 * 64-bit has a great big hole in the middle of the address
684 * space, which contains the Xen mappings. On 32-bit these
685 * will end up making a zero-sized hole and so is a no-op.
686 */
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700687 hole_low = pgd_index(USER_LIMIT);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700688 hole_high = pgd_index(PAGE_OFFSET);
689
690 pgdidx_limit = pgd_index(limit);
691#if PTRS_PER_PUD > 1
692 pudidx_limit = pud_index(limit);
693#else
694 pudidx_limit = 0;
695#endif
696#if PTRS_PER_PMD > 1
697 pmdidx_limit = pmd_index(limit);
698#else
699 pmdidx_limit = 0;
700#endif
701
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700702 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700703 pud_t *pud;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700704
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700705 if (pgdidx >= hole_low && pgdidx < hole_high)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700706 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700707
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700708 if (!pgd_val(pgd[pgdidx]))
709 continue;
710
711 pud = pud_offset(&pgd[pgdidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700712
713 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700714 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700715
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700716 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700717 pmd_t *pmd;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700718
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700719 if (pgdidx == pgdidx_limit &&
720 pudidx > pudidx_limit)
721 goto out;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700722
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700723 if (pud_none(pud[pudidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700724 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700725
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700726 pmd = pmd_offset(&pud[pudidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700727
728 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700729 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700730
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700731 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
732 struct page *pte;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700733
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700734 if (pgdidx == pgdidx_limit &&
735 pudidx == pudidx_limit &&
736 pmdidx > pmdidx_limit)
737 goto out;
738
739 if (pmd_none(pmd[pmdidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700740 continue;
741
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700742 pte = pmd_page(pmd[pmdidx]);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700743 flush |= (*func)(mm, pte, PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700744 }
745 }
746 }
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700747
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700748out:
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700749 /* Do the top level last, so that the callbacks can use it as
750 a cue to do final things like tlb flushes. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700751 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700752
753 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700754}
755
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700756/* If we're using split pte locks, then take the page's lock and
757 return a pointer to it. Otherwise return NULL. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700758static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700759{
760 spinlock_t *ptl = NULL;
761
Jeremy Fitzhardingef7d0b922008-09-09 15:43:22 -0700762#if USE_SPLIT_PTLOCKS
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700763 ptl = __pte_lockptr(page);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700764 spin_lock_nest_lock(ptl, &mm->page_table_lock);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700765#endif
766
767 return ptl;
768}
769
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700770static void xen_pte_unlock(void *v)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700771{
772 spinlock_t *ptl = v;
773 spin_unlock(ptl);
774}
775
776static void xen_do_pin(unsigned level, unsigned long pfn)
777{
778 struct mmuext_op *op;
779 struct multicall_space mcs;
780
781 mcs = __xen_mc_entry(sizeof(*op));
782 op = mcs.args;
783 op->cmd = level;
784 op->arg1.mfn = pfn_to_mfn(pfn);
785 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
786}
787
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700788static int xen_pin_page(struct mm_struct *mm, struct page *page,
789 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700790{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700791 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700792 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700793
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700794 if (pgfl)
795 flush = 0; /* already pinned */
796 else if (PageHighMem(page))
797 /* kmaps need flushing if we found an unpinned
798 highpage */
799 flush = 1;
800 else {
801 void *pt = lowmem_page_address(page);
802 unsigned long pfn = page_to_pfn(page);
803 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700804 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700805
806 flush = 0;
807
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700808 /*
809 * We need to hold the pagetable lock between the time
810 * we make the pagetable RO and when we actually pin
811 * it. If we don't, then other users may come in and
812 * attempt to update the pagetable by writing it,
813 * which will fail because the memory is RO but not
814 * pinned, so Xen won't do the trap'n'emulate.
815 *
816 * If we're using split pte locks, we can't hold the
817 * entire pagetable's worth of locks during the
818 * traverse, because we may wrap the preempt count (8
819 * bits). The solution is to mark RO and pin each PTE
820 * page while holding the lock. This means the number
821 * of locks we end up holding is never more than a
822 * batch size (~32 entries, at present).
823 *
824 * If we're not using split pte locks, we needn't pin
825 * the PTE pages independently, because we're
826 * protected by the overall pagetable lock.
827 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700828 ptl = NULL;
829 if (level == PT_PTE)
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700830 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700831
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700832 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
833 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700834 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
835
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700836 if (ptl) {
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700837 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
838
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700839 /* Queue a deferred unlock for when this batch
840 is completed. */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700841 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700842 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700843 }
844
845 return flush;
846}
847
848/* This is called just after a mm has been created, but it has not
849 been used yet. We need to make sure that its pagetable is all
850 read-only, and can be pinned. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700851static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700852{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700853 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700854
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700855 if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) {
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700856 /* re-enable interrupts for kmap_flush_unused */
857 xen_mc_issue(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700858 kmap_flush_unused();
Nick Piggindb64fe02008-10-18 20:27:03 -0700859 vm_unmap_aliases();
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700860 xen_mc_batch();
861 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700862
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700863#ifdef CONFIG_X86_64
864 {
865 pgd_t *user_pgd = xen_get_user_pgd(pgd);
866
867 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
868
869 if (user_pgd) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700870 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700871 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(user_pgd)));
872 }
873 }
874#else /* CONFIG_X86_32 */
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700875#ifdef CONFIG_X86_PAE
876 /* Need to make sure unshared kernel PMD is pinnable */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700877 xen_pin_page(mm, virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])),
878 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700879#endif
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100880 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700881#endif /* CONFIG_X86_64 */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700882 xen_mc_issue(0);
883}
884
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700885static void xen_pgd_pin(struct mm_struct *mm)
886{
887 __xen_pgd_pin(mm, mm->pgd);
888}
889
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100890/*
891 * On save, we need to pin all pagetables to make sure they get their
892 * mfns turned into pfns. Search the list for any unpinned pgds and pin
893 * them (unpinned pgds are not currently in use, probably because the
894 * process is under construction or destruction).
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700895 *
896 * Expected to be called in stop_machine() ("equivalent to taking
897 * every spinlock in the system"), so the locking doesn't really
898 * matter all that much.
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100899 */
900void xen_mm_pin_all(void)
901{
902 unsigned long flags;
903 struct page *page;
904
905 spin_lock_irqsave(&pgd_lock, flags);
906
907 list_for_each_entry(page, &pgd_list, lru) {
908 if (!PagePinned(page)) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700909 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100910 SetPageSavePinned(page);
911 }
912 }
913
914 spin_unlock_irqrestore(&pgd_lock, flags);
915}
916
Eduardo Habkostc1f2f092008-07-08 15:06:24 -0700917/*
918 * The init_mm pagetable is really pinned as soon as its created, but
919 * that's before we have page structures to store the bits. So do all
920 * the book-keeping now.
921 */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700922static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page,
923 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700924{
925 SetPagePinned(page);
926 return 0;
927}
928
929void __init xen_mark_init_mm_pinned(void)
930{
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700931 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700932}
933
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700934static int xen_unpin_page(struct mm_struct *mm, struct page *page,
935 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700936{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700937 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700938
939 if (pgfl && !PageHighMem(page)) {
940 void *pt = lowmem_page_address(page);
941 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700942 spinlock_t *ptl = NULL;
943 struct multicall_space mcs;
944
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700945 /*
946 * Do the converse to pin_page. If we're using split
947 * pte locks, we must be holding the lock for while
948 * the pte page is unpinned but still RO to prevent
949 * concurrent updates from seeing it in this
950 * partially-pinned state.
951 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700952 if (level == PT_PTE) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700953 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700954
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700955 if (ptl)
956 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700957 }
958
959 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700960
961 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
962 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700963 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
964
965 if (ptl) {
966 /* unlock when batch completed */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700967 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700968 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700969 }
970
971 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700972}
973
974/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700975static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700976{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700977 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700978
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700979 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700980
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700981#ifdef CONFIG_X86_64
982 {
983 pgd_t *user_pgd = xen_get_user_pgd(pgd);
984
985 if (user_pgd) {
986 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700987 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700988 }
989 }
990#endif
991
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700992#ifdef CONFIG_X86_PAE
993 /* Need to make sure unshared kernel PMD is unpinned */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700994 xen_unpin_page(mm, virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])),
995 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700996#endif
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700997
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700998 xen_pgd_walk(mm, xen_unpin_page, USER_LIMIT);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700999
1000 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001001}
1002
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001003static void xen_pgd_unpin(struct mm_struct *mm)
1004{
1005 __xen_pgd_unpin(mm, mm->pgd);
1006}
1007
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001008/*
1009 * On resume, undo any pinning done at save, so that the rest of the
1010 * kernel doesn't see any unexpected pinned pagetables.
1011 */
1012void xen_mm_unpin_all(void)
1013{
1014 unsigned long flags;
1015 struct page *page;
1016
1017 spin_lock_irqsave(&pgd_lock, flags);
1018
1019 list_for_each_entry(page, &pgd_list, lru) {
1020 if (PageSavePinned(page)) {
1021 BUG_ON(!PagePinned(page));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001022 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001023 ClearPageSavePinned(page);
1024 }
1025 }
1026
1027 spin_unlock_irqrestore(&pgd_lock, flags);
1028}
1029
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001030void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
1031{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001032 spin_lock(&next->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001033 xen_pgd_pin(next);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001034 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001035}
1036
1037void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
1038{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001039 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001040 xen_pgd_pin(mm);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001041 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001042}
1043
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001044
1045#ifdef CONFIG_SMP
1046/* Another cpu may still have their %cr3 pointing at the pagetable, so
1047 we need to repoint it somewhere else before we can unpin it. */
1048static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001049{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001050 struct mm_struct *mm = info;
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001051 struct mm_struct *active_mm;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001052
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001053#ifdef CONFIG_X86_64
1054 active_mm = read_pda(active_mm);
1055#else
1056 active_mm = __get_cpu_var(cpu_tlbstate).active_mm;
1057#endif
1058
1059 if (active_mm == mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001060 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001061
1062 /* If this cpu still has a stale cr3 reference, then make sure
1063 it has been flushed. */
1064 if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) {
1065 load_cr3(swapper_pg_dir);
1066 arch_flush_lazy_cpu_mode();
1067 }
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001068}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001069
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001070static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001071{
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001072 cpumask_t mask;
1073 unsigned cpu;
1074
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001075 if (current->active_mm == mm) {
1076 if (current->mm == mm)
1077 load_cr3(swapper_pg_dir);
1078 else
1079 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001080 arch_flush_lazy_cpu_mode();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001081 }
1082
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001083 /* Get the "official" set of cpus referring to our pagetable. */
1084 mask = mm->cpu_vm_mask;
1085
1086 /* It's possible that a vcpu may have a stale reference to our
1087 cr3, because its in lazy mode, and it hasn't yet flushed
1088 its set of pending hypercalls yet. In this case, we can
1089 look at its actual current cr3 value, and force it to flush
1090 if needed. */
1091 for_each_online_cpu(cpu) {
1092 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
1093 cpu_set(cpu, mask);
1094 }
1095
1096 if (!cpus_empty(mask))
Jens Axboe3b16cf82008-06-26 11:21:54 +02001097 smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001098}
1099#else
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001100static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001101{
1102 if (current->active_mm == mm)
1103 load_cr3(swapper_pg_dir);
1104}
1105#endif
1106
1107/*
1108 * While a process runs, Xen pins its pagetables, which means that the
1109 * hypervisor forces it to be read-only, and it controls all updates
1110 * to it. This means that all pagetable updates have to go via the
1111 * hypervisor, which is moderately expensive.
1112 *
1113 * Since we're pulling the pagetable down, we switch to use init_mm,
1114 * unpin old process pagetable and mark it all read-write, which
1115 * allows further operations on it to be simple memory accesses.
1116 *
1117 * The only subtle point is that another CPU may be still using the
1118 * pagetable because of lazy tlb flushing. This means we need need to
1119 * switch all CPUs off this pagetable before we can unpin it.
1120 */
1121void xen_exit_mmap(struct mm_struct *mm)
1122{
1123 get_cpu(); /* make sure we don't move around */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001124 xen_drop_mm_ref(mm);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001125 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001126
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001127 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -07001128
1129 /* pgd may not be pinned in the error exit path of execve */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001130 if (xen_page_pinned(mm->pgd))
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001131 xen_pgd_unpin(mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001132
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001133 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001134}
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07001135
1136#ifdef CONFIG_XEN_DEBUG_FS
1137
1138static struct dentry *d_mmu_debug;
1139
1140static int __init xen_mmu_debugfs(void)
1141{
1142 struct dentry *d_xen = xen_init_debugfs();
1143
1144 if (d_xen == NULL)
1145 return -ENOMEM;
1146
1147 d_mmu_debug = debugfs_create_dir("mmu", d_xen);
1148
1149 debugfs_create_u8("zero_stats", 0644, d_mmu_debug, &zero_stats);
1150
1151 debugfs_create_u32("pgd_update", 0444, d_mmu_debug, &mmu_stats.pgd_update);
1152 debugfs_create_u32("pgd_update_pinned", 0444, d_mmu_debug,
1153 &mmu_stats.pgd_update_pinned);
1154 debugfs_create_u32("pgd_update_batched", 0444, d_mmu_debug,
1155 &mmu_stats.pgd_update_pinned);
1156
1157 debugfs_create_u32("pud_update", 0444, d_mmu_debug, &mmu_stats.pud_update);
1158 debugfs_create_u32("pud_update_pinned", 0444, d_mmu_debug,
1159 &mmu_stats.pud_update_pinned);
1160 debugfs_create_u32("pud_update_batched", 0444, d_mmu_debug,
1161 &mmu_stats.pud_update_pinned);
1162
1163 debugfs_create_u32("pmd_update", 0444, d_mmu_debug, &mmu_stats.pmd_update);
1164 debugfs_create_u32("pmd_update_pinned", 0444, d_mmu_debug,
1165 &mmu_stats.pmd_update_pinned);
1166 debugfs_create_u32("pmd_update_batched", 0444, d_mmu_debug,
1167 &mmu_stats.pmd_update_pinned);
1168
1169 debugfs_create_u32("pte_update", 0444, d_mmu_debug, &mmu_stats.pte_update);
1170// debugfs_create_u32("pte_update_pinned", 0444, d_mmu_debug,
1171// &mmu_stats.pte_update_pinned);
1172 debugfs_create_u32("pte_update_batched", 0444, d_mmu_debug,
1173 &mmu_stats.pte_update_pinned);
1174
1175 debugfs_create_u32("mmu_update", 0444, d_mmu_debug, &mmu_stats.mmu_update);
1176 debugfs_create_u32("mmu_update_extended", 0444, d_mmu_debug,
1177 &mmu_stats.mmu_update_extended);
1178 xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug,
1179 mmu_stats.mmu_update_histo, 20);
1180
1181 debugfs_create_u32("set_pte_at", 0444, d_mmu_debug, &mmu_stats.set_pte_at);
1182 debugfs_create_u32("set_pte_at_batched", 0444, d_mmu_debug,
1183 &mmu_stats.set_pte_at_batched);
1184 debugfs_create_u32("set_pte_at_current", 0444, d_mmu_debug,
1185 &mmu_stats.set_pte_at_current);
1186 debugfs_create_u32("set_pte_at_kernel", 0444, d_mmu_debug,
1187 &mmu_stats.set_pte_at_kernel);
1188
1189 debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit);
1190 debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug,
1191 &mmu_stats.prot_commit_batched);
1192
1193 return 0;
1194}
1195fs_initcall(xen_mmu_debugfs);
1196
1197#endif /* CONFIG_XEN_DEBUG_FS */