blob: d3752b6ce6e67d3579b8b44de28647cae24560bb [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070043#include <linux/bug.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044
45#include <asm/pgtable.h>
46#include <asm/tlbflush.h>
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -070047#include <asm/fixmap.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070048#include <asm/mmu_context.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070049#include <asm/paravirt.h>
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070050#include <asm/linkage.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070051
52#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070053#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070054
55#include <xen/page.h>
56#include <xen/interface/xen.h>
57
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070058#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070059#include "mmu.h"
60
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -070061/*
62 * Just beyond the highest usermode address. STACK_TOP_MAX has a
63 * redzone above it, so round it up to a PGD boundary.
64 */
65#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
66
67
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010068#define P2M_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +010069#define TOP_ENTRIES (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010070
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +010071/* Placeholder for holes in the address space */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070072static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE] __page_aligned_data =
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +010073 { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };
74
75 /* Array of pointers to pages containing p2m entries */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070076static unsigned long *p2m_top[TOP_ENTRIES] __page_aligned_data =
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +010077 { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010078
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +010079/* Arrays of p2m arrays expressed in mfns used for save/restore */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070080static unsigned long p2m_top_mfn[TOP_ENTRIES] __page_aligned_bss;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +010081
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070082static unsigned long p2m_top_mfn_list[TOP_ENTRIES / P2M_ENTRIES_PER_PAGE]
83 __page_aligned_bss;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +010084
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010085static inline unsigned p2m_top_index(unsigned long pfn)
86{
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +010087 BUG_ON(pfn >= MAX_DOMAIN_PAGES);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010088 return pfn / P2M_ENTRIES_PER_PAGE;
89}
90
91static inline unsigned p2m_index(unsigned long pfn)
92{
93 return pfn % P2M_ENTRIES_PER_PAGE;
94}
95
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +010096/* Build the parallel p2m_top_mfn structures */
97void xen_setup_mfn_list_list(void)
98{
99 unsigned pfn, idx;
100
101 for(pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
102 unsigned topidx = p2m_top_index(pfn);
103
104 p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]);
105 }
106
107 for(idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
108 unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
109 p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
110 }
111
112 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
113
114 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
115 virt_to_mfn(p2m_top_mfn_list);
116 HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages;
117}
118
119/* Set up p2m_top to point to the domain-builder provided p2m pages */
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100120void __init xen_build_dynamic_phys_to_machine(void)
121{
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100122 unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100123 unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100124 unsigned pfn;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100125
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100126 for(pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100127 unsigned topidx = p2m_top_index(pfn);
128
129 p2m_top[topidx] = &mfn_list[pfn];
130 }
131}
132
133unsigned long get_phys_to_machine(unsigned long pfn)
134{
135 unsigned topidx, idx;
136
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100137 if (unlikely(pfn >= MAX_DOMAIN_PAGES))
138 return INVALID_P2M_ENTRY;
139
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100140 topidx = p2m_top_index(pfn);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100141 idx = p2m_index(pfn);
142 return p2m_top[topidx][idx];
143}
Ingo Molnar15ce60052008-06-02 13:20:11 +0200144EXPORT_SYMBOL_GPL(get_phys_to_machine);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100145
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100146static void alloc_p2m(unsigned long **pp, unsigned long *mfnp)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100147{
148 unsigned long *p;
149 unsigned i;
150
151 p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
152 BUG_ON(p == NULL);
153
154 for(i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
155 p[i] = INVALID_P2M_ENTRY;
156
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100157 if (cmpxchg(pp, p2m_missing, p) != p2m_missing)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100158 free_page((unsigned long)p);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100159 else
160 *mfnp = virt_to_mfn(p);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100161}
162
163void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
164{
165 unsigned topidx, idx;
166
167 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
168 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
169 return;
170 }
171
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100172 if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
173 BUG_ON(mfn != INVALID_P2M_ENTRY);
174 return;
175 }
176
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100177 topidx = p2m_top_index(pfn);
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100178 if (p2m_top[topidx] == p2m_missing) {
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100179 /* no need to allocate a page to store an invalid entry */
180 if (mfn == INVALID_P2M_ENTRY)
181 return;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100182 alloc_p2m(&p2m_top[topidx], &p2m_top_mfn[topidx]);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100183 }
184
185 idx = p2m_index(pfn);
186 p2m_top[topidx][idx] = mfn;
187}
188
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700189xmaddr_t arbitrary_virt_to_machine(void *vaddr)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700190{
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700191 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100192 unsigned int level;
Ingo Molnarf0646e42008-01-30 13:33:43 +0100193 pte_t *pte = lookup_address(address, &level);
Jan Beulichde067812008-05-15 13:24:52 +0100194 unsigned offset = address & ~PAGE_MASK;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700195
196 BUG_ON(pte == NULL);
197
Jeremy Fitzhardingeebd879e2008-07-08 15:06:54 -0700198 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700199}
200
201void make_lowmem_page_readonly(void *vaddr)
202{
203 pte_t *pte, ptev;
204 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100205 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700206
Ingo Molnarf0646e42008-01-30 13:33:43 +0100207 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700208 BUG_ON(pte == NULL);
209
210 ptev = pte_wrprotect(*pte);
211
212 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
213 BUG();
214}
215
216void make_lowmem_page_readwrite(void *vaddr)
217{
218 pte_t *pte, ptev;
219 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100220 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700221
Ingo Molnarf0646e42008-01-30 13:33:43 +0100222 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700223 BUG_ON(pte == NULL);
224
225 ptev = pte_mkwrite(*pte);
226
227 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
228 BUG();
229}
230
231
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100232static bool page_pinned(void *ptr)
233{
234 struct page *page = virt_to_page(ptr);
235
236 return PagePinned(page);
237}
238
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700239static void extend_mmu_update(const struct mmu_update *update)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700240{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700241 struct multicall_space mcs;
242 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700243
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700244 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
245
246 if (mcs.mc != NULL)
247 mcs.mc->args[1]++;
248 else {
249 mcs = __xen_mc_entry(sizeof(*u));
250 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
251 }
252
253 u = mcs.args;
254 *u = *update;
255}
256
257void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
258{
259 struct mmu_update u;
260
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700261 preempt_disable();
262
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700263 xen_mc_batch();
264
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700265 /* ptr may be ioremapped for 64-bit pagetable setup */
266 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700267 u.val = pmd_val_ma(val);
268 extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700269
270 xen_mc_issue(PARAVIRT_LAZY_MMU);
271
272 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700273}
274
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100275void xen_set_pmd(pmd_t *ptr, pmd_t val)
276{
277 /* If page is not pinned, we can just update the entry
278 directly */
279 if (!page_pinned(ptr)) {
280 *ptr = val;
281 return;
282 }
283
284 xen_set_pmd_hyper(ptr, val);
285}
286
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700287/*
288 * Associate a virtual page frame with a given physical page frame
289 * and protection flags for that frame.
290 */
291void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
292{
Jeremy Fitzhardinge836fe2f2008-07-08 15:06:58 -0700293 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700294}
295
296void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
297 pte_t *ptep, pte_t pteval)
298{
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700299 /* updates to init_mm may be done without lock */
300 if (mm == &init_mm)
301 preempt_disable();
302
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700303 if (mm == current->mm || mm == &init_mm) {
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700304 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700305 struct multicall_space mcs;
306 mcs = xen_mc_entry(0);
307
308 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
309 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700310 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700311 } else
312 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700313 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700314 }
315 xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700316
317out:
318 if (mm == &init_mm)
319 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700320}
321
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700322pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
323{
324 /* Just return the pte as-is. We preserve the bits on commit */
325 return *ptep;
326}
327
328void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
329 pte_t *ptep, pte_t pte)
330{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700331 struct mmu_update u;
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700332
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700333 xen_mc_batch();
334
335 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
336 u.val = pte_val_ma(pte);
337 extend_mmu_update(&u);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700338
339 xen_mc_issue(PARAVIRT_LAZY_MMU);
340}
341
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700342/* Assume pteval_t is equivalent to all the other *val_t types. */
343static pteval_t pte_mfn_to_pfn(pteval_t val)
344{
345 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700346 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700347 pteval_t flags = val & PTE_FLAGS_MASK;
Jeremy Fitzhardinged8355ac2008-07-03 22:10:18 -0700348 val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700349 }
350
351 return val;
352}
353
354static pteval_t pte_pfn_to_mfn(pteval_t val)
355{
356 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700357 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700358 pteval_t flags = val & PTE_FLAGS_MASK;
Jeremy Fitzhardinged8355ac2008-07-03 22:10:18 -0700359 val = ((pteval_t)pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700360 }
361
362 return val;
363}
364
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700365pteval_t xen_pte_val(pte_t pte)
366{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700367 return pte_mfn_to_pfn(pte.pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700368}
369
370pgdval_t xen_pgd_val(pgd_t pgd)
371{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700372 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700373}
374
375pte_t xen_make_pte(pteval_t pte)
376{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700377 pte = pte_pfn_to_mfn(pte);
378 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700379}
380
381pgd_t xen_make_pgd(pgdval_t pgd)
382{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700383 pgd = pte_pfn_to_mfn(pgd);
384 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700385}
386
387pmdval_t xen_pmd_val(pmd_t pmd)
388{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700389 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700390}
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100391
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100392void xen_set_pud_hyper(pud_t *ptr, pud_t val)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700393{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700394 struct mmu_update u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700395
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700396 preempt_disable();
397
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700398 xen_mc_batch();
399
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700400 /* ptr may be ioremapped for 64-bit pagetable setup */
401 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700402 u.val = pud_val_ma(val);
403 extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700404
405 xen_mc_issue(PARAVIRT_LAZY_MMU);
406
407 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700408}
409
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100410void xen_set_pud(pud_t *ptr, pud_t val)
411{
412 /* If page is not pinned, we can just update the entry
413 directly */
414 if (!page_pinned(ptr)) {
415 *ptr = val;
416 return;
417 }
418
419 xen_set_pud_hyper(ptr, val);
420}
421
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700422void xen_set_pte(pte_t *ptep, pte_t pte)
423{
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700424#ifdef CONFIG_X86_PAE
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700425 ptep->pte_high = pte.pte_high;
426 smp_wmb();
427 ptep->pte_low = pte.pte_low;
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700428#else
429 *ptep = pte;
430#endif
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700431}
432
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700433#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700434void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
435{
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700436 set_64bit((u64 *)ptep, native_pte_val(pte));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700437}
438
439void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
440{
441 ptep->pte_low = 0;
442 smp_wmb(); /* make sure low gets written first */
443 ptep->pte_high = 0;
444}
445
446void xen_pmd_clear(pmd_t *pmdp)
447{
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100448 set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700449}
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700450#endif /* CONFIG_X86_PAE */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700451
Jeremy Fitzhardingeabf33032008-03-17 16:37:07 -0700452pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700453{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700454 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700455 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700456}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700457
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700458#if PAGETABLE_LEVELS == 4
459pudval_t xen_pud_val(pud_t pud)
460{
461 return pte_mfn_to_pfn(pud.pud);
462}
463
464pud_t xen_make_pud(pudval_t pud)
465{
466 pud = pte_pfn_to_mfn(pud);
467
468 return native_make_pud(pud);
469}
470
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700471pgd_t *xen_get_user_pgd(pgd_t *pgd)
472{
473 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
474 unsigned offset = pgd - pgd_page;
475 pgd_t *user_ptr = NULL;
476
477 if (offset < pgd_index(USER_LIMIT)) {
478 struct page *page = virt_to_page(pgd_page);
479 user_ptr = (pgd_t *)page->private;
480 if (user_ptr)
481 user_ptr += offset;
482 }
483
484 return user_ptr;
485}
486
487static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700488{
489 struct mmu_update u;
490
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700491 u.ptr = virt_to_machine(ptr).maddr;
492 u.val = pgd_val_ma(val);
493 extend_mmu_update(&u);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700494}
495
496/*
497 * Raw hypercall-based set_pgd, intended for in early boot before
498 * there's a page structure. This implies:
499 * 1. The only existing pagetable is the kernel's
500 * 2. It is always pinned
501 * 3. It has no user pagetable attached to it
502 */
503void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
504{
505 preempt_disable();
506
507 xen_mc_batch();
508
509 __xen_set_pgd_hyper(ptr, val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700510
511 xen_mc_issue(PARAVIRT_LAZY_MMU);
512
513 preempt_enable();
514}
515
516void xen_set_pgd(pgd_t *ptr, pgd_t val)
517{
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700518 pgd_t *user_ptr = xen_get_user_pgd(ptr);
519
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700520 /* If page is not pinned, we can just update the entry
521 directly */
522 if (!page_pinned(ptr)) {
523 *ptr = val;
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700524 if (user_ptr) {
525 WARN_ON(page_pinned(user_ptr));
526 *user_ptr = val;
527 }
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700528 return;
529 }
530
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700531 /* If it's pinned, then we can at least batch the kernel and
532 user updates together. */
533 xen_mc_batch();
534
535 __xen_set_pgd_hyper(ptr, val);
536 if (user_ptr)
537 __xen_set_pgd_hyper(user_ptr, val);
538
539 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700540}
541#endif /* PAGETABLE_LEVELS == 4 */
542
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700543/*
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700544 * (Yet another) pagetable walker. This one is intended for pinning a
545 * pagetable. This means that it walks a pagetable and calls the
546 * callback function on each page it finds making up the page table,
547 * at every level. It walks the entire pagetable, but it only bothers
548 * pinning pte pages which are below limit. In the normal case this
549 * will be STACK_TOP_MAX, but at boot we need to pin up to
550 * FIXADDR_TOP.
551 *
552 * For 32-bit the important bit is that we don't pin beyond there,
553 * because then we start getting into Xen's ptes.
554 *
555 * For 64-bit, we must skip the Xen hole in the middle of the address
556 * space, just after the big x86-64 virtual hole.
557 */
558static int pgd_walk(pgd_t *pgd, int (*func)(struct page *, enum pt_level),
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700559 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700560{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700561 int flush = 0;
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700562 unsigned hole_low, hole_high;
563 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
564 unsigned pgdidx, pudidx, pmdidx;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700565
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700566 /* The limit is the last byte to be touched */
567 limit--;
568 BUG_ON(limit >= FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700569
570 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700571 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700572
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700573 /*
574 * 64-bit has a great big hole in the middle of the address
575 * space, which contains the Xen mappings. On 32-bit these
576 * will end up making a zero-sized hole and so is a no-op.
577 */
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700578 hole_low = pgd_index(USER_LIMIT);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700579 hole_high = pgd_index(PAGE_OFFSET);
580
581 pgdidx_limit = pgd_index(limit);
582#if PTRS_PER_PUD > 1
583 pudidx_limit = pud_index(limit);
584#else
585 pudidx_limit = 0;
586#endif
587#if PTRS_PER_PMD > 1
588 pmdidx_limit = pmd_index(limit);
589#else
590 pmdidx_limit = 0;
591#endif
592
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700593 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700594 pud_t *pud;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700595
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700596 if (pgdidx >= hole_low && pgdidx < hole_high)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700597 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700598
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700599 if (!pgd_val(pgd[pgdidx]))
600 continue;
601
602 pud = pud_offset(&pgd[pgdidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700603
604 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700605 flush |= (*func)(virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700606
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700607 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700608 pmd_t *pmd;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700609
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700610 if (pgdidx == pgdidx_limit &&
611 pudidx > pudidx_limit)
612 goto out;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700613
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700614 if (pud_none(pud[pudidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700615 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700616
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700617 pmd = pmd_offset(&pud[pudidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700618
619 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700620 flush |= (*func)(virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700621
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700622 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
623 struct page *pte;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700624
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700625 if (pgdidx == pgdidx_limit &&
626 pudidx == pudidx_limit &&
627 pmdidx > pmdidx_limit)
628 goto out;
629
630 if (pmd_none(pmd[pmdidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700631 continue;
632
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700633 pte = pmd_page(pmd[pmdidx]);
634 flush |= (*func)(pte, PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700635 }
636 }
637 }
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700638
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700639out:
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700640 /* Do the top level last, so that the callbacks can use it as
641 a cue to do final things like tlb flushes. */
642 flush |= (*func)(virt_to_page(pgd), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700643
644 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700645}
646
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700647static spinlock_t *lock_pte(struct page *page)
648{
649 spinlock_t *ptl = NULL;
650
651#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
652 ptl = __pte_lockptr(page);
653 spin_lock(ptl);
654#endif
655
656 return ptl;
657}
658
659static void do_unlock(void *v)
660{
661 spinlock_t *ptl = v;
662 spin_unlock(ptl);
663}
664
665static void xen_do_pin(unsigned level, unsigned long pfn)
666{
667 struct mmuext_op *op;
668 struct multicall_space mcs;
669
670 mcs = __xen_mc_entry(sizeof(*op));
671 op = mcs.args;
672 op->cmd = level;
673 op->arg1.mfn = pfn_to_mfn(pfn);
674 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
675}
676
677static int pin_page(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700678{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700679 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700680 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700681
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700682 if (pgfl)
683 flush = 0; /* already pinned */
684 else if (PageHighMem(page))
685 /* kmaps need flushing if we found an unpinned
686 highpage */
687 flush = 1;
688 else {
689 void *pt = lowmem_page_address(page);
690 unsigned long pfn = page_to_pfn(page);
691 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700692 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700693
694 flush = 0;
695
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700696 /*
697 * We need to hold the pagetable lock between the time
698 * we make the pagetable RO and when we actually pin
699 * it. If we don't, then other users may come in and
700 * attempt to update the pagetable by writing it,
701 * which will fail because the memory is RO but not
702 * pinned, so Xen won't do the trap'n'emulate.
703 *
704 * If we're using split pte locks, we can't hold the
705 * entire pagetable's worth of locks during the
706 * traverse, because we may wrap the preempt count (8
707 * bits). The solution is to mark RO and pin each PTE
708 * page while holding the lock. This means the number
709 * of locks we end up holding is never more than a
710 * batch size (~32 entries, at present).
711 *
712 * If we're not using split pte locks, we needn't pin
713 * the PTE pages independently, because we're
714 * protected by the overall pagetable lock.
715 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700716 ptl = NULL;
717 if (level == PT_PTE)
718 ptl = lock_pte(page);
719
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700720 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
721 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700722 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
723
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700724 if (ptl) {
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700725 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
726
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700727 /* Queue a deferred unlock for when this batch
728 is completed. */
729 xen_mc_callback(do_unlock, ptl);
730 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700731 }
732
733 return flush;
734}
735
736/* This is called just after a mm has been created, but it has not
737 been used yet. We need to make sure that its pagetable is all
738 read-only, and can be pinned. */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700739void xen_pgd_pin(pgd_t *pgd)
740{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700741 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700742
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700743 if (pgd_walk(pgd, pin_page, USER_LIMIT)) {
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700744 /* re-enable interrupts for kmap_flush_unused */
745 xen_mc_issue(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700746 kmap_flush_unused();
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700747 xen_mc_batch();
748 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700749
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700750#ifdef CONFIG_X86_64
751 {
752 pgd_t *user_pgd = xen_get_user_pgd(pgd);
753
754 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
755
756 if (user_pgd) {
757 pin_page(virt_to_page(user_pgd), PT_PGD);
758 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(user_pgd)));
759 }
760 }
761#else /* CONFIG_X86_32 */
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700762#ifdef CONFIG_X86_PAE
763 /* Need to make sure unshared kernel PMD is pinnable */
764 pin_page(virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), PT_PMD);
765#endif
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100766 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700767#endif /* CONFIG_X86_64 */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700768 xen_mc_issue(0);
769}
770
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100771/*
772 * On save, we need to pin all pagetables to make sure they get their
773 * mfns turned into pfns. Search the list for any unpinned pgds and pin
774 * them (unpinned pgds are not currently in use, probably because the
775 * process is under construction or destruction).
776 */
777void xen_mm_pin_all(void)
778{
779 unsigned long flags;
780 struct page *page;
781
782 spin_lock_irqsave(&pgd_lock, flags);
783
784 list_for_each_entry(page, &pgd_list, lru) {
785 if (!PagePinned(page)) {
786 xen_pgd_pin((pgd_t *)page_address(page));
787 SetPageSavePinned(page);
788 }
789 }
790
791 spin_unlock_irqrestore(&pgd_lock, flags);
792}
793
Eduardo Habkostc1f2f092008-07-08 15:06:24 -0700794/*
795 * The init_mm pagetable is really pinned as soon as its created, but
796 * that's before we have page structures to store the bits. So do all
797 * the book-keeping now.
798 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700799static __init int mark_pinned(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700800{
801 SetPagePinned(page);
802 return 0;
803}
804
805void __init xen_mark_init_mm_pinned(void)
806{
807 pgd_walk(init_mm.pgd, mark_pinned, FIXADDR_TOP);
808}
809
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700810static int unpin_page(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700811{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700812 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700813
814 if (pgfl && !PageHighMem(page)) {
815 void *pt = lowmem_page_address(page);
816 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700817 spinlock_t *ptl = NULL;
818 struct multicall_space mcs;
819
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700820 /*
821 * Do the converse to pin_page. If we're using split
822 * pte locks, we must be holding the lock for while
823 * the pte page is unpinned but still RO to prevent
824 * concurrent updates from seeing it in this
825 * partially-pinned state.
826 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700827 if (level == PT_PTE) {
828 ptl = lock_pte(page);
829
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700830 if (ptl)
831 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700832 }
833
834 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700835
836 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
837 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700838 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
839
840 if (ptl) {
841 /* unlock when batch completed */
842 xen_mc_callback(do_unlock, ptl);
843 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700844 }
845
846 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700847}
848
849/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700850static void xen_pgd_unpin(pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700851{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700852 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700853
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700854 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700855
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700856#ifdef CONFIG_X86_64
857 {
858 pgd_t *user_pgd = xen_get_user_pgd(pgd);
859
860 if (user_pgd) {
861 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(user_pgd)));
862 unpin_page(virt_to_page(user_pgd), PT_PGD);
863 }
864 }
865#endif
866
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700867#ifdef CONFIG_X86_PAE
868 /* Need to make sure unshared kernel PMD is unpinned */
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700869 unpin_page(virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700870#endif
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700871
872 pgd_walk(pgd, unpin_page, USER_LIMIT);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700873
874 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700875}
876
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100877/*
878 * On resume, undo any pinning done at save, so that the rest of the
879 * kernel doesn't see any unexpected pinned pagetables.
880 */
881void xen_mm_unpin_all(void)
882{
883 unsigned long flags;
884 struct page *page;
885
886 spin_lock_irqsave(&pgd_lock, flags);
887
888 list_for_each_entry(page, &pgd_list, lru) {
889 if (PageSavePinned(page)) {
890 BUG_ON(!PagePinned(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100891 xen_pgd_unpin((pgd_t *)page_address(page));
892 ClearPageSavePinned(page);
893 }
894 }
895
896 spin_unlock_irqrestore(&pgd_lock, flags);
897}
898
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700899void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
900{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700901 spin_lock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700902 xen_pgd_pin(next->pgd);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700903 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700904}
905
906void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
907{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700908 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700909 xen_pgd_pin(mm->pgd);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700910 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700911}
912
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700913
914#ifdef CONFIG_SMP
915/* Another cpu may still have their %cr3 pointing at the pagetable, so
916 we need to repoint it somewhere else before we can unpin it. */
917static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700918{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700919 struct mm_struct *mm = info;
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -0700920 struct mm_struct *active_mm;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700921
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -0700922#ifdef CONFIG_X86_64
923 active_mm = read_pda(active_mm);
924#else
925 active_mm = __get_cpu_var(cpu_tlbstate).active_mm;
926#endif
927
928 if (active_mm == mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700929 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700930
931 /* If this cpu still has a stale cr3 reference, then make sure
932 it has been flushed. */
933 if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) {
934 load_cr3(swapper_pg_dir);
935 arch_flush_lazy_cpu_mode();
936 }
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700937}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700938
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700939static void drop_mm_ref(struct mm_struct *mm)
940{
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700941 cpumask_t mask;
942 unsigned cpu;
943
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700944 if (current->active_mm == mm) {
945 if (current->mm == mm)
946 load_cr3(swapper_pg_dir);
947 else
948 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700949 arch_flush_lazy_cpu_mode();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700950 }
951
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700952 /* Get the "official" set of cpus referring to our pagetable. */
953 mask = mm->cpu_vm_mask;
954
955 /* It's possible that a vcpu may have a stale reference to our
956 cr3, because its in lazy mode, and it hasn't yet flushed
957 its set of pending hypercalls yet. In this case, we can
958 look at its actual current cr3 value, and force it to flush
959 if needed. */
960 for_each_online_cpu(cpu) {
961 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
962 cpu_set(cpu, mask);
963 }
964
965 if (!cpus_empty(mask))
Jens Axboe3b16cf82008-06-26 11:21:54 +0200966 smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700967}
968#else
969static void drop_mm_ref(struct mm_struct *mm)
970{
971 if (current->active_mm == mm)
972 load_cr3(swapper_pg_dir);
973}
974#endif
975
976/*
977 * While a process runs, Xen pins its pagetables, which means that the
978 * hypervisor forces it to be read-only, and it controls all updates
979 * to it. This means that all pagetable updates have to go via the
980 * hypervisor, which is moderately expensive.
981 *
982 * Since we're pulling the pagetable down, we switch to use init_mm,
983 * unpin old process pagetable and mark it all read-write, which
984 * allows further operations on it to be simple memory accesses.
985 *
986 * The only subtle point is that another CPU may be still using the
987 * pagetable because of lazy tlb flushing. This means we need need to
988 * switch all CPUs off this pagetable before we can unpin it.
989 */
990void xen_exit_mmap(struct mm_struct *mm)
991{
992 get_cpu(); /* make sure we don't move around */
993 drop_mm_ref(mm);
994 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700995
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -0700996 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -0700997
998 /* pgd may not be pinned in the error exit path of execve */
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100999 if (page_pinned(mm->pgd))
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -07001000 xen_pgd_unpin(mm->pgd);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001001
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001002 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001003}