blob: 265601d5a6ae3d95887a33074b15b564d72a4c9b [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070043#include <linux/bug.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044
45#include <asm/pgtable.h>
46#include <asm/tlbflush.h>
47#include <asm/mmu_context.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070048#include <asm/paravirt.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070049
50#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070051#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070052
53#include <xen/page.h>
54#include <xen/interface/xen.h>
55
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070056#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070057#include "mmu.h"
58
59xmaddr_t arbitrary_virt_to_machine(unsigned long address)
60{
Harvey Harrisonda7bfc52008-02-09 23:24:08 +010061 unsigned int level;
Ingo Molnarf0646e42008-01-30 13:33:43 +010062 pte_t *pte = lookup_address(address, &level);
Jan Beulichde067812008-05-15 13:24:52 +010063 unsigned offset = address & ~PAGE_MASK;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070064
65 BUG_ON(pte == NULL);
66
67 return XMADDR((pte_mfn(*pte) << PAGE_SHIFT) + offset);
68}
69
70void make_lowmem_page_readonly(void *vaddr)
71{
72 pte_t *pte, ptev;
73 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +010074 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070075
Ingo Molnarf0646e42008-01-30 13:33:43 +010076 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070077 BUG_ON(pte == NULL);
78
79 ptev = pte_wrprotect(*pte);
80
81 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
82 BUG();
83}
84
85void make_lowmem_page_readwrite(void *vaddr)
86{
87 pte_t *pte, ptev;
88 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +010089 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070090
Ingo Molnarf0646e42008-01-30 13:33:43 +010091 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070092 BUG_ON(pte == NULL);
93
94 ptev = pte_mkwrite(*pte);
95
96 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
97 BUG();
98}
99
100
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700101void xen_set_pmd(pmd_t *ptr, pmd_t val)
102{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700103 struct multicall_space mcs;
104 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700105
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700106 preempt_disable();
107
108 mcs = xen_mc_entry(sizeof(*u));
109 u = mcs.args;
110 u->ptr = virt_to_machine(ptr).maddr;
111 u->val = pmd_val_ma(val);
112 MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
113
114 xen_mc_issue(PARAVIRT_LAZY_MMU);
115
116 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700117}
118
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700119/*
120 * Associate a virtual page frame with a given physical page frame
121 * and protection flags for that frame.
122 */
123void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
124{
125 pgd_t *pgd;
126 pud_t *pud;
127 pmd_t *pmd;
128 pte_t *pte;
129
130 pgd = swapper_pg_dir + pgd_index(vaddr);
131 if (pgd_none(*pgd)) {
132 BUG();
133 return;
134 }
135 pud = pud_offset(pgd, vaddr);
136 if (pud_none(*pud)) {
137 BUG();
138 return;
139 }
140 pmd = pmd_offset(pud, vaddr);
141 if (pmd_none(*pmd)) {
142 BUG();
143 return;
144 }
145 pte = pte_offset_kernel(pmd, vaddr);
146 /* <mfn,flags> stored as-is, to permit clearing entries */
147 xen_set_pte(pte, mfn_pte(mfn, flags));
148
149 /*
150 * It's enough to flush this one mapping.
151 * (PGE mappings get flushed as well)
152 */
153 __flush_tlb_one(vaddr);
154}
155
156void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
157 pte_t *ptep, pte_t pteval)
158{
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700159 /* updates to init_mm may be done without lock */
160 if (mm == &init_mm)
161 preempt_disable();
162
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700163 if (mm == current->mm || mm == &init_mm) {
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700164 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700165 struct multicall_space mcs;
166 mcs = xen_mc_entry(0);
167
168 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
169 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700170 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700171 } else
172 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700173 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700174 }
175 xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700176
177out:
178 if (mm == &init_mm)
179 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700180}
181
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700182/* Assume pteval_t is equivalent to all the other *val_t types. */
183static pteval_t pte_mfn_to_pfn(pteval_t val)
184{
185 if (val & _PAGE_PRESENT) {
186 unsigned long mfn = (val & PTE_MASK) >> PAGE_SHIFT;
187 pteval_t flags = val & ~PTE_MASK;
188 val = (mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
189 }
190
191 return val;
192}
193
194static pteval_t pte_pfn_to_mfn(pteval_t val)
195{
196 if (val & _PAGE_PRESENT) {
197 unsigned long pfn = (val & PTE_MASK) >> PAGE_SHIFT;
198 pteval_t flags = val & ~PTE_MASK;
199 val = (pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
200 }
201
202 return val;
203}
204
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700205pteval_t xen_pte_val(pte_t pte)
206{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700207 return pte_mfn_to_pfn(pte.pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700208}
209
210pgdval_t xen_pgd_val(pgd_t pgd)
211{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700212 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700213}
214
215pte_t xen_make_pte(pteval_t pte)
216{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700217 pte = pte_pfn_to_mfn(pte);
218 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700219}
220
221pgd_t xen_make_pgd(pgdval_t pgd)
222{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700223 pgd = pte_pfn_to_mfn(pgd);
224 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700225}
226
227pmdval_t xen_pmd_val(pmd_t pmd)
228{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700229 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700230}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700231#ifdef CONFIG_X86_PAE
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700232void xen_set_pud(pud_t *ptr, pud_t val)
233{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700234 struct multicall_space mcs;
235 struct mmu_update *u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700236
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700237 preempt_disable();
238
239 mcs = xen_mc_entry(sizeof(*u));
240 u = mcs.args;
241 u->ptr = virt_to_machine(ptr).maddr;
242 u->val = pud_val_ma(val);
243 MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
244
245 xen_mc_issue(PARAVIRT_LAZY_MMU);
246
247 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700248}
249
250void xen_set_pte(pte_t *ptep, pte_t pte)
251{
252 ptep->pte_high = pte.pte_high;
253 smp_wmb();
254 ptep->pte_low = pte.pte_low;
255}
256
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700257void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
258{
259 set_64bit((u64 *)ptep, pte_val_ma(pte));
260}
261
262void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
263{
264 ptep->pte_low = 0;
265 smp_wmb(); /* make sure low gets written first */
266 ptep->pte_high = 0;
267}
268
269void xen_pmd_clear(pmd_t *pmdp)
270{
271 xen_set_pmd(pmdp, __pmd(0));
272}
273
Jeremy Fitzhardingeabf33032008-03-17 16:37:07 -0700274pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700275{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700276 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700277 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700278}
279#else /* !PAE */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700280void xen_set_pte(pte_t *ptep, pte_t pte)
281{
282 *ptep = pte;
283}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700284#endif /* CONFIG_X86_PAE */
285
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700286/*
287 (Yet another) pagetable walker. This one is intended for pinning a
288 pagetable. This means that it walks a pagetable and calls the
289 callback function on each page it finds making up the page table,
290 at every level. It walks the entire pagetable, but it only bothers
291 pinning pte pages which are below pte_limit. In the normal case
292 this will be TASK_SIZE, but at boot we need to pin up to
293 FIXADDR_TOP. But the important bit is that we don't pin beyond
294 there, because then we start getting into Xen's ptes.
295*/
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700296static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, enum pt_level),
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700297 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700298{
299 pgd_t *pgd = pgd_base;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700300 int flush = 0;
301 unsigned long addr = 0;
302 unsigned long pgd_next;
303
304 BUG_ON(limit > FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700305
306 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700307 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700308
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700309 for (; addr != FIXADDR_TOP; pgd++, addr = pgd_next) {
310 pud_t *pud;
311 unsigned long pud_limit, pud_next;
312
313 pgd_next = pud_limit = pgd_addr_end(addr, FIXADDR_TOP);
314
315 if (!pgd_val(*pgd))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700316 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700317
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700318 pud = pud_offset(pgd, 0);
319
320 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700321 flush |= (*func)(virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700322
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700323 for (; addr != pud_limit; pud++, addr = pud_next) {
324 pmd_t *pmd;
325 unsigned long pmd_limit;
326
327 pud_next = pud_addr_end(addr, pud_limit);
328
329 if (pud_next < limit)
330 pmd_limit = pud_next;
331 else
332 pmd_limit = limit;
333
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700334 if (pud_none(*pud))
335 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700336
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700337 pmd = pmd_offset(pud, 0);
338
339 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700340 flush |= (*func)(virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700341
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700342 for (; addr != pmd_limit; pmd++) {
343 addr += (PAGE_SIZE * PTRS_PER_PTE);
344 if ((pmd_limit-1) < (addr-1)) {
345 addr = pmd_limit;
346 break;
347 }
348
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700349 if (pmd_none(*pmd))
350 continue;
351
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700352 flush |= (*func)(pmd_page(*pmd), PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700353 }
354 }
355 }
356
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700357 flush |= (*func)(virt_to_page(pgd_base), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700358
359 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700360}
361
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700362static spinlock_t *lock_pte(struct page *page)
363{
364 spinlock_t *ptl = NULL;
365
366#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
367 ptl = __pte_lockptr(page);
368 spin_lock(ptl);
369#endif
370
371 return ptl;
372}
373
374static void do_unlock(void *v)
375{
376 spinlock_t *ptl = v;
377 spin_unlock(ptl);
378}
379
380static void xen_do_pin(unsigned level, unsigned long pfn)
381{
382 struct mmuext_op *op;
383 struct multicall_space mcs;
384
385 mcs = __xen_mc_entry(sizeof(*op));
386 op = mcs.args;
387 op->cmd = level;
388 op->arg1.mfn = pfn_to_mfn(pfn);
389 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
390}
391
392static int pin_page(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700393{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700394 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700395 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700396
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700397 if (pgfl)
398 flush = 0; /* already pinned */
399 else if (PageHighMem(page))
400 /* kmaps need flushing if we found an unpinned
401 highpage */
402 flush = 1;
403 else {
404 void *pt = lowmem_page_address(page);
405 unsigned long pfn = page_to_pfn(page);
406 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700407 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700408
409 flush = 0;
410
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700411 ptl = NULL;
412 if (level == PT_PTE)
413 ptl = lock_pte(page);
414
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700415 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
416 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700417 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
418
419 if (level == PT_PTE)
420 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
421
422 if (ptl) {
423 /* Queue a deferred unlock for when this batch
424 is completed. */
425 xen_mc_callback(do_unlock, ptl);
426 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700427 }
428
429 return flush;
430}
431
432/* This is called just after a mm has been created, but it has not
433 been used yet. We need to make sure that its pagetable is all
434 read-only, and can be pinned. */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700435void xen_pgd_pin(pgd_t *pgd)
436{
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700437 unsigned level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700438
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700439 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700440
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700441 if (pgd_walk(pgd, pin_page, TASK_SIZE)) {
442 /* re-enable interrupts for kmap_flush_unused */
443 xen_mc_issue(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700444 kmap_flush_unused();
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700445 xen_mc_batch();
446 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700447
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700448#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700449 level = MMUEXT_PIN_L3_TABLE;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700450#else
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700451 level = MMUEXT_PIN_L2_TABLE;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700452#endif
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700453
454 xen_do_pin(level, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700455
456 xen_mc_issue(0);
457}
458
459/* The init_mm pagetable is really pinned as soon as its created, but
460 that's before we have page structures to store the bits. So do all
461 the book-keeping now. */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700462static __init int mark_pinned(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700463{
464 SetPagePinned(page);
465 return 0;
466}
467
468void __init xen_mark_init_mm_pinned(void)
469{
470 pgd_walk(init_mm.pgd, mark_pinned, FIXADDR_TOP);
471}
472
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700473static int unpin_page(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700474{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700475 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700476
477 if (pgfl && !PageHighMem(page)) {
478 void *pt = lowmem_page_address(page);
479 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700480 spinlock_t *ptl = NULL;
481 struct multicall_space mcs;
482
483 if (level == PT_PTE) {
484 ptl = lock_pte(page);
485
486 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
487 }
488
489 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700490
491 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
492 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700493 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
494
495 if (ptl) {
496 /* unlock when batch completed */
497 xen_mc_callback(do_unlock, ptl);
498 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700499 }
500
501 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700502}
503
504/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700505static void xen_pgd_unpin(pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700506{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700507 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700508
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700509 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700510
511 pgd_walk(pgd, unpin_page, TASK_SIZE);
512
513 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700514}
515
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700516void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
517{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700518 spin_lock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700519 xen_pgd_pin(next->pgd);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700520 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700521}
522
523void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
524{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700525 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700526 xen_pgd_pin(mm->pgd);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700527 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700528}
529
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700530
531#ifdef CONFIG_SMP
532/* Another cpu may still have their %cr3 pointing at the pagetable, so
533 we need to repoint it somewhere else before we can unpin it. */
534static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700535{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700536 struct mm_struct *mm = info;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700537
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700538 if (__get_cpu_var(cpu_tlbstate).active_mm == mm)
539 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700540
541 /* If this cpu still has a stale cr3 reference, then make sure
542 it has been flushed. */
543 if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) {
544 load_cr3(swapper_pg_dir);
545 arch_flush_lazy_cpu_mode();
546 }
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700547}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700548
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700549static void drop_mm_ref(struct mm_struct *mm)
550{
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700551 cpumask_t mask;
552 unsigned cpu;
553
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700554 if (current->active_mm == mm) {
555 if (current->mm == mm)
556 load_cr3(swapper_pg_dir);
557 else
558 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700559 arch_flush_lazy_cpu_mode();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700560 }
561
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700562 /* Get the "official" set of cpus referring to our pagetable. */
563 mask = mm->cpu_vm_mask;
564
565 /* It's possible that a vcpu may have a stale reference to our
566 cr3, because its in lazy mode, and it hasn't yet flushed
567 its set of pending hypercalls yet. In this case, we can
568 look at its actual current cr3 value, and force it to flush
569 if needed. */
570 for_each_online_cpu(cpu) {
571 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
572 cpu_set(cpu, mask);
573 }
574
575 if (!cpus_empty(mask))
576 xen_smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700577}
578#else
579static void drop_mm_ref(struct mm_struct *mm)
580{
581 if (current->active_mm == mm)
582 load_cr3(swapper_pg_dir);
583}
584#endif
585
586/*
587 * While a process runs, Xen pins its pagetables, which means that the
588 * hypervisor forces it to be read-only, and it controls all updates
589 * to it. This means that all pagetable updates have to go via the
590 * hypervisor, which is moderately expensive.
591 *
592 * Since we're pulling the pagetable down, we switch to use init_mm,
593 * unpin old process pagetable and mark it all read-write, which
594 * allows further operations on it to be simple memory accesses.
595 *
596 * The only subtle point is that another CPU may be still using the
597 * pagetable because of lazy tlb flushing. This means we need need to
598 * switch all CPUs off this pagetable before we can unpin it.
599 */
600void xen_exit_mmap(struct mm_struct *mm)
601{
602 get_cpu(); /* make sure we don't move around */
603 drop_mm_ref(mm);
604 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700605
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -0700606 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -0700607
608 /* pgd may not be pinned in the error exit path of execve */
609 if (PagePinned(virt_to_page(mm->pgd)))
610 xen_pgd_unpin(mm->pgd);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700611
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -0700612 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700613}