blob: 0ac6c5dc49ba0216276c605f269a7bc3ea01efda [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070043#include <linux/bug.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044
45#include <asm/pgtable.h>
46#include <asm/tlbflush.h>
47#include <asm/mmu_context.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070048#include <asm/paravirt.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070049
50#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070051#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070052
53#include <xen/page.h>
54#include <xen/interface/xen.h>
55
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070056#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070057#include "mmu.h"
58
59xmaddr_t arbitrary_virt_to_machine(unsigned long address)
60{
61 pte_t *pte = lookup_address(address);
62 unsigned offset = address & PAGE_MASK;
63
64 BUG_ON(pte == NULL);
65
66 return XMADDR((pte_mfn(*pte) << PAGE_SHIFT) + offset);
67}
68
69void make_lowmem_page_readonly(void *vaddr)
70{
71 pte_t *pte, ptev;
72 unsigned long address = (unsigned long)vaddr;
73
74 pte = lookup_address(address);
75 BUG_ON(pte == NULL);
76
77 ptev = pte_wrprotect(*pte);
78
79 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
80 BUG();
81}
82
83void make_lowmem_page_readwrite(void *vaddr)
84{
85 pte_t *pte, ptev;
86 unsigned long address = (unsigned long)vaddr;
87
88 pte = lookup_address(address);
89 BUG_ON(pte == NULL);
90
91 ptev = pte_mkwrite(*pte);
92
93 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
94 BUG();
95}
96
97
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070098void xen_set_pmd(pmd_t *ptr, pmd_t val)
99{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700100 struct multicall_space mcs;
101 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700102
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700103 preempt_disable();
104
105 mcs = xen_mc_entry(sizeof(*u));
106 u = mcs.args;
107 u->ptr = virt_to_machine(ptr).maddr;
108 u->val = pmd_val_ma(val);
109 MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
110
111 xen_mc_issue(PARAVIRT_LAZY_MMU);
112
113 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700114}
115
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700116/*
117 * Associate a virtual page frame with a given physical page frame
118 * and protection flags for that frame.
119 */
120void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
121{
122 pgd_t *pgd;
123 pud_t *pud;
124 pmd_t *pmd;
125 pte_t *pte;
126
127 pgd = swapper_pg_dir + pgd_index(vaddr);
128 if (pgd_none(*pgd)) {
129 BUG();
130 return;
131 }
132 pud = pud_offset(pgd, vaddr);
133 if (pud_none(*pud)) {
134 BUG();
135 return;
136 }
137 pmd = pmd_offset(pud, vaddr);
138 if (pmd_none(*pmd)) {
139 BUG();
140 return;
141 }
142 pte = pte_offset_kernel(pmd, vaddr);
143 /* <mfn,flags> stored as-is, to permit clearing entries */
144 xen_set_pte(pte, mfn_pte(mfn, flags));
145
146 /*
147 * It's enough to flush this one mapping.
148 * (PGE mappings get flushed as well)
149 */
150 __flush_tlb_one(vaddr);
151}
152
153void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
154 pte_t *ptep, pte_t pteval)
155{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700156 if (mm == current->mm || mm == &init_mm) {
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700157 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700158 struct multicall_space mcs;
159 mcs = xen_mc_entry(0);
160
161 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
162 xen_mc_issue(PARAVIRT_LAZY_MMU);
163 return;
164 } else
165 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
166 return;
167 }
168 xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700169}
170
171#ifdef CONFIG_X86_PAE
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700172void xen_set_pud(pud_t *ptr, pud_t val)
173{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700174 struct multicall_space mcs;
175 struct mmu_update *u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700176
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700177 preempt_disable();
178
179 mcs = xen_mc_entry(sizeof(*u));
180 u = mcs.args;
181 u->ptr = virt_to_machine(ptr).maddr;
182 u->val = pud_val_ma(val);
183 MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
184
185 xen_mc_issue(PARAVIRT_LAZY_MMU);
186
187 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700188}
189
190void xen_set_pte(pte_t *ptep, pte_t pte)
191{
192 ptep->pte_high = pte.pte_high;
193 smp_wmb();
194 ptep->pte_low = pte.pte_low;
195}
196
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700197void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
198{
199 set_64bit((u64 *)ptep, pte_val_ma(pte));
200}
201
202void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
203{
204 ptep->pte_low = 0;
205 smp_wmb(); /* make sure low gets written first */
206 ptep->pte_high = 0;
207}
208
209void xen_pmd_clear(pmd_t *pmdp)
210{
211 xen_set_pmd(pmdp, __pmd(0));
212}
213
214unsigned long long xen_pte_val(pte_t pte)
215{
216 unsigned long long ret = 0;
217
218 if (pte.pte_low) {
219 ret = ((unsigned long long)pte.pte_high << 32) | pte.pte_low;
220 ret = machine_to_phys(XMADDR(ret)).paddr | 1;
221 }
222
223 return ret;
224}
225
226unsigned long long xen_pmd_val(pmd_t pmd)
227{
228 unsigned long long ret = pmd.pmd;
229 if (ret)
230 ret = machine_to_phys(XMADDR(ret)).paddr | 1;
231 return ret;
232}
233
234unsigned long long xen_pgd_val(pgd_t pgd)
235{
236 unsigned long long ret = pgd.pgd;
237 if (ret)
238 ret = machine_to_phys(XMADDR(ret)).paddr | 1;
239 return ret;
240}
241
242pte_t xen_make_pte(unsigned long long pte)
243{
244 if (pte & 1)
245 pte = phys_to_machine(XPADDR(pte)).maddr;
246
Jeremy Fitzhardinge2c80b012007-11-28 16:21:20 -0800247 pte &= ~_PAGE_PCD;
248
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700249 return (pte_t){ pte, pte >> 32 };
250}
251
252pmd_t xen_make_pmd(unsigned long long pmd)
253{
254 if (pmd & 1)
255 pmd = phys_to_machine(XPADDR(pmd)).maddr;
256
257 return (pmd_t){ pmd };
258}
259
260pgd_t xen_make_pgd(unsigned long long pgd)
261{
262 if (pgd & _PAGE_PRESENT)
263 pgd = phys_to_machine(XPADDR(pgd)).maddr;
264
265 return (pgd_t){ pgd };
266}
267#else /* !PAE */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700268void xen_set_pte(pte_t *ptep, pte_t pte)
269{
270 *ptep = pte;
271}
272
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700273unsigned long xen_pte_val(pte_t pte)
274{
275 unsigned long ret = pte.pte_low;
276
277 if (ret & _PAGE_PRESENT)
278 ret = machine_to_phys(XMADDR(ret)).paddr;
279
280 return ret;
281}
282
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700283unsigned long xen_pgd_val(pgd_t pgd)
284{
285 unsigned long ret = pgd.pgd;
286 if (ret)
287 ret = machine_to_phys(XMADDR(ret)).paddr | 1;
288 return ret;
289}
290
291pte_t xen_make_pte(unsigned long pte)
292{
293 if (pte & _PAGE_PRESENT)
294 pte = phys_to_machine(XPADDR(pte)).maddr;
295
Jeremy Fitzhardinge2c80b012007-11-28 16:21:20 -0800296 pte &= ~_PAGE_PCD;
297
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700298 return (pte_t){ pte };
299}
300
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700301pgd_t xen_make_pgd(unsigned long pgd)
302{
303 if (pgd & _PAGE_PRESENT)
304 pgd = phys_to_machine(XPADDR(pgd)).maddr;
305
306 return (pgd_t){ pgd };
307}
308#endif /* CONFIG_X86_PAE */
309
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700310enum pt_level {
311 PT_PGD,
312 PT_PUD,
313 PT_PMD,
314 PT_PTE
315};
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700316
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700317/*
318 (Yet another) pagetable walker. This one is intended for pinning a
319 pagetable. This means that it walks a pagetable and calls the
320 callback function on each page it finds making up the page table,
321 at every level. It walks the entire pagetable, but it only bothers
322 pinning pte pages which are below pte_limit. In the normal case
323 this will be TASK_SIZE, but at boot we need to pin up to
324 FIXADDR_TOP. But the important bit is that we don't pin beyond
325 there, because then we start getting into Xen's ptes.
326*/
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700327static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, enum pt_level),
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700328 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700329{
330 pgd_t *pgd = pgd_base;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700331 int flush = 0;
332 unsigned long addr = 0;
333 unsigned long pgd_next;
334
335 BUG_ON(limit > FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700336
337 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700338 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700339
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700340 for (; addr != FIXADDR_TOP; pgd++, addr = pgd_next) {
341 pud_t *pud;
342 unsigned long pud_limit, pud_next;
343
344 pgd_next = pud_limit = pgd_addr_end(addr, FIXADDR_TOP);
345
346 if (!pgd_val(*pgd))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700347 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700348
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700349 pud = pud_offset(pgd, 0);
350
351 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700352 flush |= (*func)(virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700353
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700354 for (; addr != pud_limit; pud++, addr = pud_next) {
355 pmd_t *pmd;
356 unsigned long pmd_limit;
357
358 pud_next = pud_addr_end(addr, pud_limit);
359
360 if (pud_next < limit)
361 pmd_limit = pud_next;
362 else
363 pmd_limit = limit;
364
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700365 if (pud_none(*pud))
366 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700367
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700368 pmd = pmd_offset(pud, 0);
369
370 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700371 flush |= (*func)(virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700372
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700373 for (; addr != pmd_limit; pmd++) {
374 addr += (PAGE_SIZE * PTRS_PER_PTE);
375 if ((pmd_limit-1) < (addr-1)) {
376 addr = pmd_limit;
377 break;
378 }
379
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700380 if (pmd_none(*pmd))
381 continue;
382
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700383 flush |= (*func)(pmd_page(*pmd), PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700384 }
385 }
386 }
387
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700388 flush |= (*func)(virt_to_page(pgd_base), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700389
390 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700391}
392
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700393static spinlock_t *lock_pte(struct page *page)
394{
395 spinlock_t *ptl = NULL;
396
397#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
398 ptl = __pte_lockptr(page);
399 spin_lock(ptl);
400#endif
401
402 return ptl;
403}
404
405static void do_unlock(void *v)
406{
407 spinlock_t *ptl = v;
408 spin_unlock(ptl);
409}
410
411static void xen_do_pin(unsigned level, unsigned long pfn)
412{
413 struct mmuext_op *op;
414 struct multicall_space mcs;
415
416 mcs = __xen_mc_entry(sizeof(*op));
417 op = mcs.args;
418 op->cmd = level;
419 op->arg1.mfn = pfn_to_mfn(pfn);
420 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
421}
422
423static int pin_page(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700424{
425 unsigned pgfl = test_and_set_bit(PG_pinned, &page->flags);
426 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700427
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700428 if (pgfl)
429 flush = 0; /* already pinned */
430 else if (PageHighMem(page))
431 /* kmaps need flushing if we found an unpinned
432 highpage */
433 flush = 1;
434 else {
435 void *pt = lowmem_page_address(page);
436 unsigned long pfn = page_to_pfn(page);
437 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700438 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700439
440 flush = 0;
441
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700442 ptl = NULL;
443 if (level == PT_PTE)
444 ptl = lock_pte(page);
445
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700446 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
447 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700448 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
449
450 if (level == PT_PTE)
451 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
452
453 if (ptl) {
454 /* Queue a deferred unlock for when this batch
455 is completed. */
456 xen_mc_callback(do_unlock, ptl);
457 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700458 }
459
460 return flush;
461}
462
463/* This is called just after a mm has been created, but it has not
464 been used yet. We need to make sure that its pagetable is all
465 read-only, and can be pinned. */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700466void xen_pgd_pin(pgd_t *pgd)
467{
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700468 unsigned level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700469
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700470 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700471
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700472 if (pgd_walk(pgd, pin_page, TASK_SIZE)) {
473 /* re-enable interrupts for kmap_flush_unused */
474 xen_mc_issue(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700475 kmap_flush_unused();
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700476 xen_mc_batch();
477 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700478
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700479#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700480 level = MMUEXT_PIN_L3_TABLE;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700481#else
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700482 level = MMUEXT_PIN_L2_TABLE;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700483#endif
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700484
485 xen_do_pin(level, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700486
487 xen_mc_issue(0);
488}
489
490/* The init_mm pagetable is really pinned as soon as its created, but
491 that's before we have page structures to store the bits. So do all
492 the book-keeping now. */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700493static __init int mark_pinned(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700494{
495 SetPagePinned(page);
496 return 0;
497}
498
499void __init xen_mark_init_mm_pinned(void)
500{
501 pgd_walk(init_mm.pgd, mark_pinned, FIXADDR_TOP);
502}
503
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700504static int unpin_page(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700505{
506 unsigned pgfl = test_and_clear_bit(PG_pinned, &page->flags);
507
508 if (pgfl && !PageHighMem(page)) {
509 void *pt = lowmem_page_address(page);
510 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700511 spinlock_t *ptl = NULL;
512 struct multicall_space mcs;
513
514 if (level == PT_PTE) {
515 ptl = lock_pte(page);
516
517 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
518 }
519
520 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700521
522 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
523 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700524 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
525
526 if (ptl) {
527 /* unlock when batch completed */
528 xen_mc_callback(do_unlock, ptl);
529 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700530 }
531
532 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700533}
534
535/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700536static void xen_pgd_unpin(pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700537{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700538 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700539
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700540 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700541
542 pgd_walk(pgd, unpin_page, TASK_SIZE);
543
544 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700545}
546
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700547void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
548{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700549 spin_lock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700550 xen_pgd_pin(next->pgd);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700551 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700552}
553
554void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
555{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700556 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700557 xen_pgd_pin(mm->pgd);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700558 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700559}
560
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700561
562#ifdef CONFIG_SMP
563/* Another cpu may still have their %cr3 pointing at the pagetable, so
564 we need to repoint it somewhere else before we can unpin it. */
565static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700566{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700567 struct mm_struct *mm = info;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700568
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700569 if (__get_cpu_var(cpu_tlbstate).active_mm == mm)
570 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700571
572 /* If this cpu still has a stale cr3 reference, then make sure
573 it has been flushed. */
574 if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) {
575 load_cr3(swapper_pg_dir);
576 arch_flush_lazy_cpu_mode();
577 }
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700578}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700579
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700580static void drop_mm_ref(struct mm_struct *mm)
581{
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700582 cpumask_t mask;
583 unsigned cpu;
584
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700585 if (current->active_mm == mm) {
586 if (current->mm == mm)
587 load_cr3(swapper_pg_dir);
588 else
589 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700590 arch_flush_lazy_cpu_mode();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700591 }
592
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700593 /* Get the "official" set of cpus referring to our pagetable. */
594 mask = mm->cpu_vm_mask;
595
596 /* It's possible that a vcpu may have a stale reference to our
597 cr3, because its in lazy mode, and it hasn't yet flushed
598 its set of pending hypercalls yet. In this case, we can
599 look at its actual current cr3 value, and force it to flush
600 if needed. */
601 for_each_online_cpu(cpu) {
602 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
603 cpu_set(cpu, mask);
604 }
605
606 if (!cpus_empty(mask))
607 xen_smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700608}
609#else
610static void drop_mm_ref(struct mm_struct *mm)
611{
612 if (current->active_mm == mm)
613 load_cr3(swapper_pg_dir);
614}
615#endif
616
617/*
618 * While a process runs, Xen pins its pagetables, which means that the
619 * hypervisor forces it to be read-only, and it controls all updates
620 * to it. This means that all pagetable updates have to go via the
621 * hypervisor, which is moderately expensive.
622 *
623 * Since we're pulling the pagetable down, we switch to use init_mm,
624 * unpin old process pagetable and mark it all read-write, which
625 * allows further operations on it to be simple memory accesses.
626 *
627 * The only subtle point is that another CPU may be still using the
628 * pagetable because of lazy tlb flushing. This means we need need to
629 * switch all CPUs off this pagetable before we can unpin it.
630 */
631void xen_exit_mmap(struct mm_struct *mm)
632{
633 get_cpu(); /* make sure we don't move around */
634 drop_mm_ref(mm);
635 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700636
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -0700637 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -0700638
639 /* pgd may not be pinned in the error exit path of execve */
640 if (PagePinned(virt_to_page(mm->pgd)))
641 xen_pgd_unpin(mm->pgd);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700642
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -0700643 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700644}