blob: 52f392893008aae5d801cff8f98878c14b0b755d [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070043#include <linux/bug.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044
45#include <asm/pgtable.h>
46#include <asm/tlbflush.h>
47#include <asm/mmu_context.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070048#include <asm/paravirt.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070049
50#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070051#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070052
53#include <xen/page.h>
54#include <xen/interface/xen.h>
55
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070056#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070057#include "mmu.h"
58
59xmaddr_t arbitrary_virt_to_machine(unsigned long address)
60{
61 pte_t *pte = lookup_address(address);
62 unsigned offset = address & PAGE_MASK;
63
64 BUG_ON(pte == NULL);
65
66 return XMADDR((pte_mfn(*pte) << PAGE_SHIFT) + offset);
67}
68
69void make_lowmem_page_readonly(void *vaddr)
70{
71 pte_t *pte, ptev;
72 unsigned long address = (unsigned long)vaddr;
73
74 pte = lookup_address(address);
75 BUG_ON(pte == NULL);
76
77 ptev = pte_wrprotect(*pte);
78
79 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
80 BUG();
81}
82
83void make_lowmem_page_readwrite(void *vaddr)
84{
85 pte_t *pte, ptev;
86 unsigned long address = (unsigned long)vaddr;
87
88 pte = lookup_address(address);
89 BUG_ON(pte == NULL);
90
91 ptev = pte_mkwrite(*pte);
92
93 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
94 BUG();
95}
96
97
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070098void xen_set_pmd(pmd_t *ptr, pmd_t val)
99{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700100 struct multicall_space mcs;
101 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700102
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700103 preempt_disable();
104
105 mcs = xen_mc_entry(sizeof(*u));
106 u = mcs.args;
107 u->ptr = virt_to_machine(ptr).maddr;
108 u->val = pmd_val_ma(val);
109 MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
110
111 xen_mc_issue(PARAVIRT_LAZY_MMU);
112
113 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700114}
115
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700116/*
117 * Associate a virtual page frame with a given physical page frame
118 * and protection flags for that frame.
119 */
120void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
121{
122 pgd_t *pgd;
123 pud_t *pud;
124 pmd_t *pmd;
125 pte_t *pte;
126
127 pgd = swapper_pg_dir + pgd_index(vaddr);
128 if (pgd_none(*pgd)) {
129 BUG();
130 return;
131 }
132 pud = pud_offset(pgd, vaddr);
133 if (pud_none(*pud)) {
134 BUG();
135 return;
136 }
137 pmd = pmd_offset(pud, vaddr);
138 if (pmd_none(*pmd)) {
139 BUG();
140 return;
141 }
142 pte = pte_offset_kernel(pmd, vaddr);
143 /* <mfn,flags> stored as-is, to permit clearing entries */
144 xen_set_pte(pte, mfn_pte(mfn, flags));
145
146 /*
147 * It's enough to flush this one mapping.
148 * (PGE mappings get flushed as well)
149 */
150 __flush_tlb_one(vaddr);
151}
152
153void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
154 pte_t *ptep, pte_t pteval)
155{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700156 if (mm == current->mm || mm == &init_mm) {
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700157 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700158 struct multicall_space mcs;
159 mcs = xen_mc_entry(0);
160
161 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
162 xen_mc_issue(PARAVIRT_LAZY_MMU);
163 return;
164 } else
165 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
166 return;
167 }
168 xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700169}
170
171#ifdef CONFIG_X86_PAE
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700172void xen_set_pud(pud_t *ptr, pud_t val)
173{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700174 struct multicall_space mcs;
175 struct mmu_update *u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700176
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700177 preempt_disable();
178
179 mcs = xen_mc_entry(sizeof(*u));
180 u = mcs.args;
181 u->ptr = virt_to_machine(ptr).maddr;
182 u->val = pud_val_ma(val);
183 MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
184
185 xen_mc_issue(PARAVIRT_LAZY_MMU);
186
187 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700188}
189
190void xen_set_pte(pte_t *ptep, pte_t pte)
191{
192 ptep->pte_high = pte.pte_high;
193 smp_wmb();
194 ptep->pte_low = pte.pte_low;
195}
196
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700197void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
198{
199 set_64bit((u64 *)ptep, pte_val_ma(pte));
200}
201
202void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
203{
204 ptep->pte_low = 0;
205 smp_wmb(); /* make sure low gets written first */
206 ptep->pte_high = 0;
207}
208
209void xen_pmd_clear(pmd_t *pmdp)
210{
211 xen_set_pmd(pmdp, __pmd(0));
212}
213
214unsigned long long xen_pte_val(pte_t pte)
215{
216 unsigned long long ret = 0;
217
218 if (pte.pte_low) {
219 ret = ((unsigned long long)pte.pte_high << 32) | pte.pte_low;
220 ret = machine_to_phys(XMADDR(ret)).paddr | 1;
221 }
222
223 return ret;
224}
225
226unsigned long long xen_pmd_val(pmd_t pmd)
227{
228 unsigned long long ret = pmd.pmd;
229 if (ret)
230 ret = machine_to_phys(XMADDR(ret)).paddr | 1;
231 return ret;
232}
233
234unsigned long long xen_pgd_val(pgd_t pgd)
235{
236 unsigned long long ret = pgd.pgd;
237 if (ret)
238 ret = machine_to_phys(XMADDR(ret)).paddr | 1;
239 return ret;
240}
241
242pte_t xen_make_pte(unsigned long long pte)
243{
244 if (pte & 1)
245 pte = phys_to_machine(XPADDR(pte)).maddr;
246
Jeremy Fitzhardingec8e53932008-01-30 13:32:57 +0100247 return (pte_t){ .pte = pte };
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700248}
249
250pmd_t xen_make_pmd(unsigned long long pmd)
251{
252 if (pmd & 1)
253 pmd = phys_to_machine(XPADDR(pmd)).maddr;
254
255 return (pmd_t){ pmd };
256}
257
258pgd_t xen_make_pgd(unsigned long long pgd)
259{
260 if (pgd & _PAGE_PRESENT)
261 pgd = phys_to_machine(XPADDR(pgd)).maddr;
262
263 return (pgd_t){ pgd };
264}
265#else /* !PAE */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700266void xen_set_pte(pte_t *ptep, pte_t pte)
267{
268 *ptep = pte;
269}
270
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700271unsigned long xen_pte_val(pte_t pte)
272{
273 unsigned long ret = pte.pte_low;
274
275 if (ret & _PAGE_PRESENT)
276 ret = machine_to_phys(XMADDR(ret)).paddr;
277
278 return ret;
279}
280
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700281unsigned long xen_pgd_val(pgd_t pgd)
282{
283 unsigned long ret = pgd.pgd;
284 if (ret)
285 ret = machine_to_phys(XMADDR(ret)).paddr | 1;
286 return ret;
287}
288
289pte_t xen_make_pte(unsigned long pte)
290{
291 if (pte & _PAGE_PRESENT)
292 pte = phys_to_machine(XPADDR(pte)).maddr;
293
Jeremy Fitzhardinge2c80b012007-11-28 16:21:20 -0800294 pte &= ~_PAGE_PCD;
295
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700296 return (pte_t){ pte };
297}
298
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700299pgd_t xen_make_pgd(unsigned long pgd)
300{
301 if (pgd & _PAGE_PRESENT)
302 pgd = phys_to_machine(XPADDR(pgd)).maddr;
303
304 return (pgd_t){ pgd };
305}
306#endif /* CONFIG_X86_PAE */
307
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700308enum pt_level {
309 PT_PGD,
310 PT_PUD,
311 PT_PMD,
312 PT_PTE
313};
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700314
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700315/*
316 (Yet another) pagetable walker. This one is intended for pinning a
317 pagetable. This means that it walks a pagetable and calls the
318 callback function on each page it finds making up the page table,
319 at every level. It walks the entire pagetable, but it only bothers
320 pinning pte pages which are below pte_limit. In the normal case
321 this will be TASK_SIZE, but at boot we need to pin up to
322 FIXADDR_TOP. But the important bit is that we don't pin beyond
323 there, because then we start getting into Xen's ptes.
324*/
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700325static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, enum pt_level),
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700326 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700327{
328 pgd_t *pgd = pgd_base;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700329 int flush = 0;
330 unsigned long addr = 0;
331 unsigned long pgd_next;
332
333 BUG_ON(limit > FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700334
335 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700336 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700337
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700338 for (; addr != FIXADDR_TOP; pgd++, addr = pgd_next) {
339 pud_t *pud;
340 unsigned long pud_limit, pud_next;
341
342 pgd_next = pud_limit = pgd_addr_end(addr, FIXADDR_TOP);
343
344 if (!pgd_val(*pgd))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700345 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700346
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700347 pud = pud_offset(pgd, 0);
348
349 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700350 flush |= (*func)(virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700351
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700352 for (; addr != pud_limit; pud++, addr = pud_next) {
353 pmd_t *pmd;
354 unsigned long pmd_limit;
355
356 pud_next = pud_addr_end(addr, pud_limit);
357
358 if (pud_next < limit)
359 pmd_limit = pud_next;
360 else
361 pmd_limit = limit;
362
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700363 if (pud_none(*pud))
364 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700365
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700366 pmd = pmd_offset(pud, 0);
367
368 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700369 flush |= (*func)(virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700370
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700371 for (; addr != pmd_limit; pmd++) {
372 addr += (PAGE_SIZE * PTRS_PER_PTE);
373 if ((pmd_limit-1) < (addr-1)) {
374 addr = pmd_limit;
375 break;
376 }
377
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700378 if (pmd_none(*pmd))
379 continue;
380
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700381 flush |= (*func)(pmd_page(*pmd), PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700382 }
383 }
384 }
385
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700386 flush |= (*func)(virt_to_page(pgd_base), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700387
388 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700389}
390
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700391static spinlock_t *lock_pte(struct page *page)
392{
393 spinlock_t *ptl = NULL;
394
395#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
396 ptl = __pte_lockptr(page);
397 spin_lock(ptl);
398#endif
399
400 return ptl;
401}
402
403static void do_unlock(void *v)
404{
405 spinlock_t *ptl = v;
406 spin_unlock(ptl);
407}
408
409static void xen_do_pin(unsigned level, unsigned long pfn)
410{
411 struct mmuext_op *op;
412 struct multicall_space mcs;
413
414 mcs = __xen_mc_entry(sizeof(*op));
415 op = mcs.args;
416 op->cmd = level;
417 op->arg1.mfn = pfn_to_mfn(pfn);
418 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
419}
420
421static int pin_page(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700422{
423 unsigned pgfl = test_and_set_bit(PG_pinned, &page->flags);
424 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700425
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700426 if (pgfl)
427 flush = 0; /* already pinned */
428 else if (PageHighMem(page))
429 /* kmaps need flushing if we found an unpinned
430 highpage */
431 flush = 1;
432 else {
433 void *pt = lowmem_page_address(page);
434 unsigned long pfn = page_to_pfn(page);
435 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700436 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700437
438 flush = 0;
439
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700440 ptl = NULL;
441 if (level == PT_PTE)
442 ptl = lock_pte(page);
443
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700444 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
445 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700446 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
447
448 if (level == PT_PTE)
449 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
450
451 if (ptl) {
452 /* Queue a deferred unlock for when this batch
453 is completed. */
454 xen_mc_callback(do_unlock, ptl);
455 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700456 }
457
458 return flush;
459}
460
461/* This is called just after a mm has been created, but it has not
462 been used yet. We need to make sure that its pagetable is all
463 read-only, and can be pinned. */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700464void xen_pgd_pin(pgd_t *pgd)
465{
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700466 unsigned level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700467
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700468 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700469
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700470 if (pgd_walk(pgd, pin_page, TASK_SIZE)) {
471 /* re-enable interrupts for kmap_flush_unused */
472 xen_mc_issue(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700473 kmap_flush_unused();
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700474 xen_mc_batch();
475 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700476
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700477#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700478 level = MMUEXT_PIN_L3_TABLE;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700479#else
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700480 level = MMUEXT_PIN_L2_TABLE;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700481#endif
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700482
483 xen_do_pin(level, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700484
485 xen_mc_issue(0);
486}
487
488/* The init_mm pagetable is really pinned as soon as its created, but
489 that's before we have page structures to store the bits. So do all
490 the book-keeping now. */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700491static __init int mark_pinned(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700492{
493 SetPagePinned(page);
494 return 0;
495}
496
497void __init xen_mark_init_mm_pinned(void)
498{
499 pgd_walk(init_mm.pgd, mark_pinned, FIXADDR_TOP);
500}
501
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700502static int unpin_page(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700503{
504 unsigned pgfl = test_and_clear_bit(PG_pinned, &page->flags);
505
506 if (pgfl && !PageHighMem(page)) {
507 void *pt = lowmem_page_address(page);
508 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700509 spinlock_t *ptl = NULL;
510 struct multicall_space mcs;
511
512 if (level == PT_PTE) {
513 ptl = lock_pte(page);
514
515 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
516 }
517
518 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700519
520 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
521 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700522 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
523
524 if (ptl) {
525 /* unlock when batch completed */
526 xen_mc_callback(do_unlock, ptl);
527 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700528 }
529
530 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700531}
532
533/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700534static void xen_pgd_unpin(pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700535{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700536 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700537
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700538 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700539
540 pgd_walk(pgd, unpin_page, TASK_SIZE);
541
542 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700543}
544
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700545void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
546{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700547 spin_lock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700548 xen_pgd_pin(next->pgd);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700549 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700550}
551
552void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
553{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700554 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700555 xen_pgd_pin(mm->pgd);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700556 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700557}
558
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700559
560#ifdef CONFIG_SMP
561/* Another cpu may still have their %cr3 pointing at the pagetable, so
562 we need to repoint it somewhere else before we can unpin it. */
563static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700564{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700565 struct mm_struct *mm = info;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700566
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700567 if (__get_cpu_var(cpu_tlbstate).active_mm == mm)
568 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700569
570 /* If this cpu still has a stale cr3 reference, then make sure
571 it has been flushed. */
572 if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) {
573 load_cr3(swapper_pg_dir);
574 arch_flush_lazy_cpu_mode();
575 }
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700576}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700577
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700578static void drop_mm_ref(struct mm_struct *mm)
579{
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700580 cpumask_t mask;
581 unsigned cpu;
582
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700583 if (current->active_mm == mm) {
584 if (current->mm == mm)
585 load_cr3(swapper_pg_dir);
586 else
587 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700588 arch_flush_lazy_cpu_mode();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700589 }
590
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700591 /* Get the "official" set of cpus referring to our pagetable. */
592 mask = mm->cpu_vm_mask;
593
594 /* It's possible that a vcpu may have a stale reference to our
595 cr3, because its in lazy mode, and it hasn't yet flushed
596 its set of pending hypercalls yet. In this case, we can
597 look at its actual current cr3 value, and force it to flush
598 if needed. */
599 for_each_online_cpu(cpu) {
600 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
601 cpu_set(cpu, mask);
602 }
603
604 if (!cpus_empty(mask))
605 xen_smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700606}
607#else
608static void drop_mm_ref(struct mm_struct *mm)
609{
610 if (current->active_mm == mm)
611 load_cr3(swapper_pg_dir);
612}
613#endif
614
615/*
616 * While a process runs, Xen pins its pagetables, which means that the
617 * hypervisor forces it to be read-only, and it controls all updates
618 * to it. This means that all pagetable updates have to go via the
619 * hypervisor, which is moderately expensive.
620 *
621 * Since we're pulling the pagetable down, we switch to use init_mm,
622 * unpin old process pagetable and mark it all read-write, which
623 * allows further operations on it to be simple memory accesses.
624 *
625 * The only subtle point is that another CPU may be still using the
626 * pagetable because of lazy tlb flushing. This means we need need to
627 * switch all CPUs off this pagetable before we can unpin it.
628 */
629void xen_exit_mmap(struct mm_struct *mm)
630{
631 get_cpu(); /* make sure we don't move around */
632 drop_mm_ref(mm);
633 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700634
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -0700635 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -0700636
637 /* pgd may not be pinned in the error exit path of execve */
638 if (PagePinned(virt_to_page(mm->pgd)))
639 xen_pgd_unpin(mm->pgd);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700640
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -0700641 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700642}