blob: e59e53b11e2b3d725a6920d2b606cfc05805f705 [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070043#include <linux/debugfs.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044#include <linux/bug.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070045
46#include <asm/pgtable.h>
47#include <asm/tlbflush.h>
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -070048#include <asm/fixmap.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070049#include <asm/mmu_context.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070050#include <asm/paravirt.h>
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070051#include <asm/linkage.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070052
53#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070054#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070055
56#include <xen/page.h>
57#include <xen/interface/xen.h>
58
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070059#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070060#include "mmu.h"
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070061#include "debugfs.h"
62
63#define MMU_UPDATE_HISTO 30
64
65#ifdef CONFIG_XEN_DEBUG_FS
66
67static struct {
68 u32 pgd_update;
69 u32 pgd_update_pinned;
70 u32 pgd_update_batched;
71
72 u32 pud_update;
73 u32 pud_update_pinned;
74 u32 pud_update_batched;
75
76 u32 pmd_update;
77 u32 pmd_update_pinned;
78 u32 pmd_update_batched;
79
80 u32 pte_update;
81 u32 pte_update_pinned;
82 u32 pte_update_batched;
83
84 u32 mmu_update;
85 u32 mmu_update_extended;
86 u32 mmu_update_histo[MMU_UPDATE_HISTO];
87
88 u32 prot_commit;
89 u32 prot_commit_batched;
90
91 u32 set_pte_at;
92 u32 set_pte_at_batched;
93 u32 set_pte_at_pinned;
94 u32 set_pte_at_current;
95 u32 set_pte_at_kernel;
96} mmu_stats;
97
98static u8 zero_stats;
99
100static inline void check_zero(void)
101{
102 if (unlikely(zero_stats)) {
103 memset(&mmu_stats, 0, sizeof(mmu_stats));
104 zero_stats = 0;
105 }
106}
107
108#define ADD_STATS(elem, val) \
109 do { check_zero(); mmu_stats.elem += (val); } while(0)
110
111#else /* !CONFIG_XEN_DEBUG_FS */
112
113#define ADD_STATS(elem, val) do { (void)(val); } while(0)
114
115#endif /* CONFIG_XEN_DEBUG_FS */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700116
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700117/*
118 * Just beyond the highest usermode address. STACK_TOP_MAX has a
119 * redzone above it, so round it up to a PGD boundary.
120 */
121#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
122
123
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100124#define P2M_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100125#define TOP_ENTRIES (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100126
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100127/* Placeholder for holes in the address space */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -0700128static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE] __page_aligned_data =
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100129 { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };
130
131 /* Array of pointers to pages containing p2m entries */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -0700132static unsigned long *p2m_top[TOP_ENTRIES] __page_aligned_data =
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100133 { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100134
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100135/* Arrays of p2m arrays expressed in mfns used for save/restore */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -0700136static unsigned long p2m_top_mfn[TOP_ENTRIES] __page_aligned_bss;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100137
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -0700138static unsigned long p2m_top_mfn_list[TOP_ENTRIES / P2M_ENTRIES_PER_PAGE]
139 __page_aligned_bss;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100140
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100141static inline unsigned p2m_top_index(unsigned long pfn)
142{
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100143 BUG_ON(pfn >= MAX_DOMAIN_PAGES);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100144 return pfn / P2M_ENTRIES_PER_PAGE;
145}
146
147static inline unsigned p2m_index(unsigned long pfn)
148{
149 return pfn % P2M_ENTRIES_PER_PAGE;
150}
151
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100152/* Build the parallel p2m_top_mfn structures */
153void xen_setup_mfn_list_list(void)
154{
155 unsigned pfn, idx;
156
157 for(pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
158 unsigned topidx = p2m_top_index(pfn);
159
160 p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]);
161 }
162
163 for(idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
164 unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
165 p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
166 }
167
168 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
169
170 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
171 virt_to_mfn(p2m_top_mfn_list);
172 HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages;
173}
174
175/* Set up p2m_top to point to the domain-builder provided p2m pages */
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100176void __init xen_build_dynamic_phys_to_machine(void)
177{
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100178 unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100179 unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100180 unsigned pfn;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100181
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100182 for(pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100183 unsigned topidx = p2m_top_index(pfn);
184
185 p2m_top[topidx] = &mfn_list[pfn];
186 }
187}
188
189unsigned long get_phys_to_machine(unsigned long pfn)
190{
191 unsigned topidx, idx;
192
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100193 if (unlikely(pfn >= MAX_DOMAIN_PAGES))
194 return INVALID_P2M_ENTRY;
195
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100196 topidx = p2m_top_index(pfn);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100197 idx = p2m_index(pfn);
198 return p2m_top[topidx][idx];
199}
Ingo Molnar15ce60052008-06-02 13:20:11 +0200200EXPORT_SYMBOL_GPL(get_phys_to_machine);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100201
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100202static void alloc_p2m(unsigned long **pp, unsigned long *mfnp)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100203{
204 unsigned long *p;
205 unsigned i;
206
207 p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
208 BUG_ON(p == NULL);
209
210 for(i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
211 p[i] = INVALID_P2M_ENTRY;
212
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100213 if (cmpxchg(pp, p2m_missing, p) != p2m_missing)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100214 free_page((unsigned long)p);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100215 else
216 *mfnp = virt_to_mfn(p);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100217}
218
219void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
220{
221 unsigned topidx, idx;
222
223 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
224 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
225 return;
226 }
227
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100228 if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
229 BUG_ON(mfn != INVALID_P2M_ENTRY);
230 return;
231 }
232
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100233 topidx = p2m_top_index(pfn);
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100234 if (p2m_top[topidx] == p2m_missing) {
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100235 /* no need to allocate a page to store an invalid entry */
236 if (mfn == INVALID_P2M_ENTRY)
237 return;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100238 alloc_p2m(&p2m_top[topidx], &p2m_top_mfn[topidx]);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100239 }
240
241 idx = p2m_index(pfn);
242 p2m_top[topidx][idx] = mfn;
243}
244
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700245xmaddr_t arbitrary_virt_to_machine(void *vaddr)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700246{
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700247 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100248 unsigned int level;
Chris Lalancette9f32d212008-10-23 17:40:25 -0700249 pte_t *pte;
250 unsigned offset;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700251
Chris Lalancette9f32d212008-10-23 17:40:25 -0700252 /*
253 * if the PFN is in the linear mapped vaddr range, we can just use
254 * the (quick) virt_to_machine() p2m lookup
255 */
256 if (virt_addr_valid(vaddr))
257 return virt_to_machine(vaddr);
258
259 /* otherwise we have to do a (slower) full page-table walk */
260
261 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700262 BUG_ON(pte == NULL);
Chris Lalancette9f32d212008-10-23 17:40:25 -0700263 offset = address & ~PAGE_MASK;
Jeremy Fitzhardingeebd879e2008-07-08 15:06:54 -0700264 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700265}
266
267void make_lowmem_page_readonly(void *vaddr)
268{
269 pte_t *pte, ptev;
270 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100271 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700272
Ingo Molnarf0646e42008-01-30 13:33:43 +0100273 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700274 BUG_ON(pte == NULL);
275
276 ptev = pte_wrprotect(*pte);
277
278 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
279 BUG();
280}
281
282void make_lowmem_page_readwrite(void *vaddr)
283{
284 pte_t *pte, ptev;
285 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100286 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700287
Ingo Molnarf0646e42008-01-30 13:33:43 +0100288 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700289 BUG_ON(pte == NULL);
290
291 ptev = pte_mkwrite(*pte);
292
293 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
294 BUG();
295}
296
297
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700298static bool xen_page_pinned(void *ptr)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100299{
300 struct page *page = virt_to_page(ptr);
301
302 return PagePinned(page);
303}
304
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700305static void xen_extend_mmu_update(const struct mmu_update *update)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700306{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700307 struct multicall_space mcs;
308 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700309
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700310 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
311
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700312 if (mcs.mc != NULL) {
313 ADD_STATS(mmu_update_extended, 1);
314 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1);
315
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700316 mcs.mc->args[1]++;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700317
318 if (mcs.mc->args[1] < MMU_UPDATE_HISTO)
319 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1);
320 else
321 ADD_STATS(mmu_update_histo[0], 1);
322 } else {
323 ADD_STATS(mmu_update, 1);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700324 mcs = __xen_mc_entry(sizeof(*u));
325 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700326 ADD_STATS(mmu_update_histo[1], 1);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700327 }
328
329 u = mcs.args;
330 *u = *update;
331}
332
333void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
334{
335 struct mmu_update u;
336
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700337 preempt_disable();
338
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700339 xen_mc_batch();
340
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700341 /* ptr may be ioremapped for 64-bit pagetable setup */
342 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700343 u.val = pmd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700344 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700345
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700346 ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
347
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700348 xen_mc_issue(PARAVIRT_LAZY_MMU);
349
350 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700351}
352
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100353void xen_set_pmd(pmd_t *ptr, pmd_t val)
354{
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700355 ADD_STATS(pmd_update, 1);
356
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100357 /* If page is not pinned, we can just update the entry
358 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700359 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100360 *ptr = val;
361 return;
362 }
363
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700364 ADD_STATS(pmd_update_pinned, 1);
365
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100366 xen_set_pmd_hyper(ptr, val);
367}
368
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700369/*
370 * Associate a virtual page frame with a given physical page frame
371 * and protection flags for that frame.
372 */
373void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
374{
Jeremy Fitzhardinge836fe2f2008-07-08 15:06:58 -0700375 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700376}
377
378void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
379 pte_t *ptep, pte_t pteval)
380{
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700381 /* updates to init_mm may be done without lock */
382 if (mm == &init_mm)
383 preempt_disable();
384
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700385 ADD_STATS(set_pte_at, 1);
386// ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep));
387 ADD_STATS(set_pte_at_current, mm == current->mm);
388 ADD_STATS(set_pte_at_kernel, mm == &init_mm);
389
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700390 if (mm == current->mm || mm == &init_mm) {
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700391 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700392 struct multicall_space mcs;
393 mcs = xen_mc_entry(0);
394
395 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700396 ADD_STATS(set_pte_at_batched, 1);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700397 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700398 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700399 } else
400 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700401 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700402 }
403 xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700404
405out:
406 if (mm == &init_mm)
407 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700408}
409
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700410pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
411{
412 /* Just return the pte as-is. We preserve the bits on commit */
413 return *ptep;
414}
415
416void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
417 pte_t *ptep, pte_t pte)
418{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700419 struct mmu_update u;
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700420
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700421 xen_mc_batch();
422
Chris Lalancette9f32d212008-10-23 17:40:25 -0700423 u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700424 u.val = pte_val_ma(pte);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700425 xen_extend_mmu_update(&u);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700426
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700427 ADD_STATS(prot_commit, 1);
428 ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
429
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700430 xen_mc_issue(PARAVIRT_LAZY_MMU);
431}
432
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700433/* Assume pteval_t is equivalent to all the other *val_t types. */
434static pteval_t pte_mfn_to_pfn(pteval_t val)
435{
436 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700437 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700438 pteval_t flags = val & PTE_FLAGS_MASK;
Jeremy Fitzhardinged8355ac2008-07-03 22:10:18 -0700439 val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700440 }
441
442 return val;
443}
444
445static pteval_t pte_pfn_to_mfn(pteval_t val)
446{
447 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700448 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700449 pteval_t flags = val & PTE_FLAGS_MASK;
Jeremy Fitzhardinged8355ac2008-07-03 22:10:18 -0700450 val = ((pteval_t)pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700451 }
452
453 return val;
454}
455
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700456pteval_t xen_pte_val(pte_t pte)
457{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700458 return pte_mfn_to_pfn(pte.pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700459}
460
461pgdval_t xen_pgd_val(pgd_t pgd)
462{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700463 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700464}
465
466pte_t xen_make_pte(pteval_t pte)
467{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700468 pte = pte_pfn_to_mfn(pte);
469 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700470}
471
472pgd_t xen_make_pgd(pgdval_t pgd)
473{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700474 pgd = pte_pfn_to_mfn(pgd);
475 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700476}
477
478pmdval_t xen_pmd_val(pmd_t pmd)
479{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700480 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700481}
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100482
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100483void xen_set_pud_hyper(pud_t *ptr, pud_t val)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700484{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700485 struct mmu_update u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700486
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700487 preempt_disable();
488
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700489 xen_mc_batch();
490
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700491 /* ptr may be ioremapped for 64-bit pagetable setup */
492 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700493 u.val = pud_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700494 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700495
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700496 ADD_STATS(pud_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
497
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700498 xen_mc_issue(PARAVIRT_LAZY_MMU);
499
500 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700501}
502
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100503void xen_set_pud(pud_t *ptr, pud_t val)
504{
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700505 ADD_STATS(pud_update, 1);
506
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100507 /* If page is not pinned, we can just update the entry
508 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700509 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100510 *ptr = val;
511 return;
512 }
513
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700514 ADD_STATS(pud_update_pinned, 1);
515
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100516 xen_set_pud_hyper(ptr, val);
517}
518
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700519void xen_set_pte(pte_t *ptep, pte_t pte)
520{
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700521 ADD_STATS(pte_update, 1);
522// ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
523 ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
524
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700525#ifdef CONFIG_X86_PAE
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700526 ptep->pte_high = pte.pte_high;
527 smp_wmb();
528 ptep->pte_low = pte.pte_low;
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700529#else
530 *ptep = pte;
531#endif
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700532}
533
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700534#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700535void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
536{
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700537 set_64bit((u64 *)ptep, native_pte_val(pte));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700538}
539
540void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
541{
542 ptep->pte_low = 0;
543 smp_wmb(); /* make sure low gets written first */
544 ptep->pte_high = 0;
545}
546
547void xen_pmd_clear(pmd_t *pmdp)
548{
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100549 set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700550}
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700551#endif /* CONFIG_X86_PAE */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700552
Jeremy Fitzhardingeabf33032008-03-17 16:37:07 -0700553pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700554{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700555 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700556 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700557}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700558
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700559#if PAGETABLE_LEVELS == 4
560pudval_t xen_pud_val(pud_t pud)
561{
562 return pte_mfn_to_pfn(pud.pud);
563}
564
565pud_t xen_make_pud(pudval_t pud)
566{
567 pud = pte_pfn_to_mfn(pud);
568
569 return native_make_pud(pud);
570}
571
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700572pgd_t *xen_get_user_pgd(pgd_t *pgd)
573{
574 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
575 unsigned offset = pgd - pgd_page;
576 pgd_t *user_ptr = NULL;
577
578 if (offset < pgd_index(USER_LIMIT)) {
579 struct page *page = virt_to_page(pgd_page);
580 user_ptr = (pgd_t *)page->private;
581 if (user_ptr)
582 user_ptr += offset;
583 }
584
585 return user_ptr;
586}
587
588static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700589{
590 struct mmu_update u;
591
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700592 u.ptr = virt_to_machine(ptr).maddr;
593 u.val = pgd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700594 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700595}
596
597/*
598 * Raw hypercall-based set_pgd, intended for in early boot before
599 * there's a page structure. This implies:
600 * 1. The only existing pagetable is the kernel's
601 * 2. It is always pinned
602 * 3. It has no user pagetable attached to it
603 */
604void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
605{
606 preempt_disable();
607
608 xen_mc_batch();
609
610 __xen_set_pgd_hyper(ptr, val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700611
612 xen_mc_issue(PARAVIRT_LAZY_MMU);
613
614 preempt_enable();
615}
616
617void xen_set_pgd(pgd_t *ptr, pgd_t val)
618{
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700619 pgd_t *user_ptr = xen_get_user_pgd(ptr);
620
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700621 ADD_STATS(pgd_update, 1);
622
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700623 /* If page is not pinned, we can just update the entry
624 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700625 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700626 *ptr = val;
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700627 if (user_ptr) {
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700628 WARN_ON(xen_page_pinned(user_ptr));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700629 *user_ptr = val;
630 }
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700631 return;
632 }
633
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700634 ADD_STATS(pgd_update_pinned, 1);
635 ADD_STATS(pgd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
636
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700637 /* If it's pinned, then we can at least batch the kernel and
638 user updates together. */
639 xen_mc_batch();
640
641 __xen_set_pgd_hyper(ptr, val);
642 if (user_ptr)
643 __xen_set_pgd_hyper(user_ptr, val);
644
645 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700646}
647#endif /* PAGETABLE_LEVELS == 4 */
648
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700649/*
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700650 * (Yet another) pagetable walker. This one is intended for pinning a
651 * pagetable. This means that it walks a pagetable and calls the
652 * callback function on each page it finds making up the page table,
653 * at every level. It walks the entire pagetable, but it only bothers
654 * pinning pte pages which are below limit. In the normal case this
655 * will be STACK_TOP_MAX, but at boot we need to pin up to
656 * FIXADDR_TOP.
657 *
658 * For 32-bit the important bit is that we don't pin beyond there,
659 * because then we start getting into Xen's ptes.
660 *
661 * For 64-bit, we must skip the Xen hole in the middle of the address
662 * space, just after the big x86-64 virtual hole.
663 */
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000664static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
665 int (*func)(struct mm_struct *mm, struct page *,
666 enum pt_level),
667 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700668{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700669 int flush = 0;
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700670 unsigned hole_low, hole_high;
671 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
672 unsigned pgdidx, pudidx, pmdidx;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700673
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700674 /* The limit is the last byte to be touched */
675 limit--;
676 BUG_ON(limit >= FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700677
678 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700679 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700680
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700681 /*
682 * 64-bit has a great big hole in the middle of the address
683 * space, which contains the Xen mappings. On 32-bit these
684 * will end up making a zero-sized hole and so is a no-op.
685 */
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700686 hole_low = pgd_index(USER_LIMIT);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700687 hole_high = pgd_index(PAGE_OFFSET);
688
689 pgdidx_limit = pgd_index(limit);
690#if PTRS_PER_PUD > 1
691 pudidx_limit = pud_index(limit);
692#else
693 pudidx_limit = 0;
694#endif
695#if PTRS_PER_PMD > 1
696 pmdidx_limit = pmd_index(limit);
697#else
698 pmdidx_limit = 0;
699#endif
700
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700701 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700702 pud_t *pud;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700703
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700704 if (pgdidx >= hole_low && pgdidx < hole_high)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700705 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700706
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700707 if (!pgd_val(pgd[pgdidx]))
708 continue;
709
710 pud = pud_offset(&pgd[pgdidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700711
712 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700713 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700714
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700715 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700716 pmd_t *pmd;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700717
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700718 if (pgdidx == pgdidx_limit &&
719 pudidx > pudidx_limit)
720 goto out;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700721
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700722 if (pud_none(pud[pudidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700723 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700724
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700725 pmd = pmd_offset(&pud[pudidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700726
727 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700728 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700729
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700730 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
731 struct page *pte;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700732
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700733 if (pgdidx == pgdidx_limit &&
734 pudidx == pudidx_limit &&
735 pmdidx > pmdidx_limit)
736 goto out;
737
738 if (pmd_none(pmd[pmdidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700739 continue;
740
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700741 pte = pmd_page(pmd[pmdidx]);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700742 flush |= (*func)(mm, pte, PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700743 }
744 }
745 }
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700746
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700747out:
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700748 /* Do the top level last, so that the callbacks can use it as
749 a cue to do final things like tlb flushes. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700750 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700751
752 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700753}
754
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000755static int xen_pgd_walk(struct mm_struct *mm,
756 int (*func)(struct mm_struct *mm, struct page *,
757 enum pt_level),
758 unsigned long limit)
759{
760 return __xen_pgd_walk(mm, mm->pgd, func, limit);
761}
762
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700763/* If we're using split pte locks, then take the page's lock and
764 return a pointer to it. Otherwise return NULL. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700765static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700766{
767 spinlock_t *ptl = NULL;
768
Jeremy Fitzhardingef7d0b922008-09-09 15:43:22 -0700769#if USE_SPLIT_PTLOCKS
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700770 ptl = __pte_lockptr(page);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700771 spin_lock_nest_lock(ptl, &mm->page_table_lock);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700772#endif
773
774 return ptl;
775}
776
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700777static void xen_pte_unlock(void *v)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700778{
779 spinlock_t *ptl = v;
780 spin_unlock(ptl);
781}
782
783static void xen_do_pin(unsigned level, unsigned long pfn)
784{
785 struct mmuext_op *op;
786 struct multicall_space mcs;
787
788 mcs = __xen_mc_entry(sizeof(*op));
789 op = mcs.args;
790 op->cmd = level;
791 op->arg1.mfn = pfn_to_mfn(pfn);
792 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
793}
794
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700795static int xen_pin_page(struct mm_struct *mm, struct page *page,
796 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700797{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700798 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700799 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700800
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700801 if (pgfl)
802 flush = 0; /* already pinned */
803 else if (PageHighMem(page))
804 /* kmaps need flushing if we found an unpinned
805 highpage */
806 flush = 1;
807 else {
808 void *pt = lowmem_page_address(page);
809 unsigned long pfn = page_to_pfn(page);
810 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700811 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700812
813 flush = 0;
814
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700815 /*
816 * We need to hold the pagetable lock between the time
817 * we make the pagetable RO and when we actually pin
818 * it. If we don't, then other users may come in and
819 * attempt to update the pagetable by writing it,
820 * which will fail because the memory is RO but not
821 * pinned, so Xen won't do the trap'n'emulate.
822 *
823 * If we're using split pte locks, we can't hold the
824 * entire pagetable's worth of locks during the
825 * traverse, because we may wrap the preempt count (8
826 * bits). The solution is to mark RO and pin each PTE
827 * page while holding the lock. This means the number
828 * of locks we end up holding is never more than a
829 * batch size (~32 entries, at present).
830 *
831 * If we're not using split pte locks, we needn't pin
832 * the PTE pages independently, because we're
833 * protected by the overall pagetable lock.
834 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700835 ptl = NULL;
836 if (level == PT_PTE)
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700837 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700838
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700839 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
840 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700841 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
842
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700843 if (ptl) {
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700844 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
845
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700846 /* Queue a deferred unlock for when this batch
847 is completed. */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700848 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700849 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700850 }
851
852 return flush;
853}
854
855/* This is called just after a mm has been created, but it has not
856 been used yet. We need to make sure that its pagetable is all
857 read-only, and can be pinned. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700858static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700859{
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100860 vm_unmap_aliases();
861
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700862 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700863
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000864 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100865 /* re-enable interrupts for flushing */
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700866 xen_mc_issue(0);
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100867
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700868 kmap_flush_unused();
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100869
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700870 xen_mc_batch();
871 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700872
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700873#ifdef CONFIG_X86_64
874 {
875 pgd_t *user_pgd = xen_get_user_pgd(pgd);
876
877 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
878
879 if (user_pgd) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700880 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700881 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(user_pgd)));
882 }
883 }
884#else /* CONFIG_X86_32 */
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700885#ifdef CONFIG_X86_PAE
886 /* Need to make sure unshared kernel PMD is pinnable */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -0800887 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700888 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700889#endif
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100890 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700891#endif /* CONFIG_X86_64 */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700892 xen_mc_issue(0);
893}
894
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700895static void xen_pgd_pin(struct mm_struct *mm)
896{
897 __xen_pgd_pin(mm, mm->pgd);
898}
899
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100900/*
901 * On save, we need to pin all pagetables to make sure they get their
902 * mfns turned into pfns. Search the list for any unpinned pgds and pin
903 * them (unpinned pgds are not currently in use, probably because the
904 * process is under construction or destruction).
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700905 *
906 * Expected to be called in stop_machine() ("equivalent to taking
907 * every spinlock in the system"), so the locking doesn't really
908 * matter all that much.
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100909 */
910void xen_mm_pin_all(void)
911{
912 unsigned long flags;
913 struct page *page;
914
915 spin_lock_irqsave(&pgd_lock, flags);
916
917 list_for_each_entry(page, &pgd_list, lru) {
918 if (!PagePinned(page)) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700919 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100920 SetPageSavePinned(page);
921 }
922 }
923
924 spin_unlock_irqrestore(&pgd_lock, flags);
925}
926
Eduardo Habkostc1f2f092008-07-08 15:06:24 -0700927/*
928 * The init_mm pagetable is really pinned as soon as its created, but
929 * that's before we have page structures to store the bits. So do all
930 * the book-keeping now.
931 */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700932static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page,
933 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700934{
935 SetPagePinned(page);
936 return 0;
937}
938
939void __init xen_mark_init_mm_pinned(void)
940{
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700941 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700942}
943
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700944static int xen_unpin_page(struct mm_struct *mm, struct page *page,
945 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700946{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700947 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700948
949 if (pgfl && !PageHighMem(page)) {
950 void *pt = lowmem_page_address(page);
951 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700952 spinlock_t *ptl = NULL;
953 struct multicall_space mcs;
954
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700955 /*
956 * Do the converse to pin_page. If we're using split
957 * pte locks, we must be holding the lock for while
958 * the pte page is unpinned but still RO to prevent
959 * concurrent updates from seeing it in this
960 * partially-pinned state.
961 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700962 if (level == PT_PTE) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700963 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700964
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700965 if (ptl)
966 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700967 }
968
969 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700970
971 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
972 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700973 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
974
975 if (ptl) {
976 /* unlock when batch completed */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700977 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700978 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700979 }
980
981 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700982}
983
984/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700985static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700986{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700987 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700988
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700989 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700990
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700991#ifdef CONFIG_X86_64
992 {
993 pgd_t *user_pgd = xen_get_user_pgd(pgd);
994
995 if (user_pgd) {
996 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700997 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700998 }
999 }
1000#endif
1001
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001002#ifdef CONFIG_X86_PAE
1003 /* Need to make sure unshared kernel PMD is unpinned */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -08001004 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001005 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001006#endif
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001007
Ian Campbell86bbc2c2008-11-21 10:21:33 +00001008 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001009
1010 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001011}
1012
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001013static void xen_pgd_unpin(struct mm_struct *mm)
1014{
1015 __xen_pgd_unpin(mm, mm->pgd);
1016}
1017
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001018/*
1019 * On resume, undo any pinning done at save, so that the rest of the
1020 * kernel doesn't see any unexpected pinned pagetables.
1021 */
1022void xen_mm_unpin_all(void)
1023{
1024 unsigned long flags;
1025 struct page *page;
1026
1027 spin_lock_irqsave(&pgd_lock, flags);
1028
1029 list_for_each_entry(page, &pgd_list, lru) {
1030 if (PageSavePinned(page)) {
1031 BUG_ON(!PagePinned(page));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001032 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001033 ClearPageSavePinned(page);
1034 }
1035 }
1036
1037 spin_unlock_irqrestore(&pgd_lock, flags);
1038}
1039
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001040void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
1041{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001042 spin_lock(&next->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001043 xen_pgd_pin(next);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001044 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001045}
1046
1047void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
1048{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001049 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001050 xen_pgd_pin(mm);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001051 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001052}
1053
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001054
1055#ifdef CONFIG_SMP
1056/* Another cpu may still have their %cr3 pointing at the pagetable, so
1057 we need to repoint it somewhere else before we can unpin it. */
1058static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001059{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001060 struct mm_struct *mm = info;
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001061 struct mm_struct *active_mm;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001062
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001063#ifdef CONFIG_X86_64
1064 active_mm = read_pda(active_mm);
1065#else
1066 active_mm = __get_cpu_var(cpu_tlbstate).active_mm;
1067#endif
1068
1069 if (active_mm == mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001070 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001071
1072 /* If this cpu still has a stale cr3 reference, then make sure
1073 it has been flushed. */
1074 if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) {
1075 load_cr3(swapper_pg_dir);
1076 arch_flush_lazy_cpu_mode();
1077 }
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001078}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001079
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001080static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001081{
Mike Travise4d98202008-12-16 17:34:05 -08001082 cpumask_var_t mask;
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001083 unsigned cpu;
1084
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001085 if (current->active_mm == mm) {
1086 if (current->mm == mm)
1087 load_cr3(swapper_pg_dir);
1088 else
1089 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001090 arch_flush_lazy_cpu_mode();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001091 }
1092
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001093 /* Get the "official" set of cpus referring to our pagetable. */
Mike Travise4d98202008-12-16 17:34:05 -08001094 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1095 for_each_online_cpu(cpu) {
1096 if (!cpumask_test_cpu(cpu, &mm->cpu_vm_mask)
1097 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1098 continue;
1099 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1100 }
1101 return;
1102 }
1103 cpumask_copy(mask, &mm->cpu_vm_mask);
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001104
1105 /* It's possible that a vcpu may have a stale reference to our
1106 cr3, because its in lazy mode, and it hasn't yet flushed
1107 its set of pending hypercalls yet. In this case, we can
1108 look at its actual current cr3 value, and force it to flush
1109 if needed. */
1110 for_each_online_cpu(cpu) {
1111 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
Mike Travise4d98202008-12-16 17:34:05 -08001112 cpumask_set_cpu(cpu, mask);
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001113 }
1114
Mike Travise4d98202008-12-16 17:34:05 -08001115 if (!cpumask_empty(mask))
1116 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1117 free_cpumask_var(mask);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001118}
1119#else
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001120static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001121{
1122 if (current->active_mm == mm)
1123 load_cr3(swapper_pg_dir);
1124}
1125#endif
1126
1127/*
1128 * While a process runs, Xen pins its pagetables, which means that the
1129 * hypervisor forces it to be read-only, and it controls all updates
1130 * to it. This means that all pagetable updates have to go via the
1131 * hypervisor, which is moderately expensive.
1132 *
1133 * Since we're pulling the pagetable down, we switch to use init_mm,
1134 * unpin old process pagetable and mark it all read-write, which
1135 * allows further operations on it to be simple memory accesses.
1136 *
1137 * The only subtle point is that another CPU may be still using the
1138 * pagetable because of lazy tlb flushing. This means we need need to
1139 * switch all CPUs off this pagetable before we can unpin it.
1140 */
1141void xen_exit_mmap(struct mm_struct *mm)
1142{
1143 get_cpu(); /* make sure we don't move around */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001144 xen_drop_mm_ref(mm);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001145 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001146
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001147 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -07001148
1149 /* pgd may not be pinned in the error exit path of execve */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001150 if (xen_page_pinned(mm->pgd))
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001151 xen_pgd_unpin(mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001152
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001153 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001154}
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07001155
1156#ifdef CONFIG_XEN_DEBUG_FS
1157
1158static struct dentry *d_mmu_debug;
1159
1160static int __init xen_mmu_debugfs(void)
1161{
1162 struct dentry *d_xen = xen_init_debugfs();
1163
1164 if (d_xen == NULL)
1165 return -ENOMEM;
1166
1167 d_mmu_debug = debugfs_create_dir("mmu", d_xen);
1168
1169 debugfs_create_u8("zero_stats", 0644, d_mmu_debug, &zero_stats);
1170
1171 debugfs_create_u32("pgd_update", 0444, d_mmu_debug, &mmu_stats.pgd_update);
1172 debugfs_create_u32("pgd_update_pinned", 0444, d_mmu_debug,
1173 &mmu_stats.pgd_update_pinned);
1174 debugfs_create_u32("pgd_update_batched", 0444, d_mmu_debug,
1175 &mmu_stats.pgd_update_pinned);
1176
1177 debugfs_create_u32("pud_update", 0444, d_mmu_debug, &mmu_stats.pud_update);
1178 debugfs_create_u32("pud_update_pinned", 0444, d_mmu_debug,
1179 &mmu_stats.pud_update_pinned);
1180 debugfs_create_u32("pud_update_batched", 0444, d_mmu_debug,
1181 &mmu_stats.pud_update_pinned);
1182
1183 debugfs_create_u32("pmd_update", 0444, d_mmu_debug, &mmu_stats.pmd_update);
1184 debugfs_create_u32("pmd_update_pinned", 0444, d_mmu_debug,
1185 &mmu_stats.pmd_update_pinned);
1186 debugfs_create_u32("pmd_update_batched", 0444, d_mmu_debug,
1187 &mmu_stats.pmd_update_pinned);
1188
1189 debugfs_create_u32("pte_update", 0444, d_mmu_debug, &mmu_stats.pte_update);
1190// debugfs_create_u32("pte_update_pinned", 0444, d_mmu_debug,
1191// &mmu_stats.pte_update_pinned);
1192 debugfs_create_u32("pte_update_batched", 0444, d_mmu_debug,
1193 &mmu_stats.pte_update_pinned);
1194
1195 debugfs_create_u32("mmu_update", 0444, d_mmu_debug, &mmu_stats.mmu_update);
1196 debugfs_create_u32("mmu_update_extended", 0444, d_mmu_debug,
1197 &mmu_stats.mmu_update_extended);
1198 xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug,
1199 mmu_stats.mmu_update_histo, 20);
1200
1201 debugfs_create_u32("set_pte_at", 0444, d_mmu_debug, &mmu_stats.set_pte_at);
1202 debugfs_create_u32("set_pte_at_batched", 0444, d_mmu_debug,
1203 &mmu_stats.set_pte_at_batched);
1204 debugfs_create_u32("set_pte_at_current", 0444, d_mmu_debug,
1205 &mmu_stats.set_pte_at_current);
1206 debugfs_create_u32("set_pte_at_kernel", 0444, d_mmu_debug,
1207 &mmu_stats.set_pte_at_kernel);
1208
1209 debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit);
1210 debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug,
1211 &mmu_stats.prot_commit_batched);
1212
1213 return 0;
1214}
1215fs_initcall(xen_mmu_debugfs);
1216
1217#endif /* CONFIG_XEN_DEBUG_FS */