blob: 8132aa8c5d49d2797b1a3c30333cb89df6cbe9fd [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070043#include <linux/bug.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044
45#include <asm/pgtable.h>
46#include <asm/tlbflush.h>
47#include <asm/mmu_context.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070048#include <asm/paravirt.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070049
50#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070051#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070052
53#include <xen/page.h>
54#include <xen/interface/xen.h>
55
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070056#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070057#include "mmu.h"
58
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010059#define P2M_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +010060#define TOP_ENTRIES (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010061
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +010062/* Placeholder for holes in the address space */
63static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE]
64 __attribute__((section(".data.page_aligned"))) =
65 { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };
66
67 /* Array of pointers to pages containing p2m entries */
68static unsigned long *p2m_top[TOP_ENTRIES]
69 __attribute__((section(".data.page_aligned"))) =
70 { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010071
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +010072/* Arrays of p2m arrays expressed in mfns used for save/restore */
73static unsigned long p2m_top_mfn[TOP_ENTRIES]
74 __attribute__((section(".bss.page_aligned")));
75
Ingo Molnarb20aecc2008-05-28 14:24:38 +020076static unsigned long p2m_top_mfn_list[
77 PAGE_ALIGN(TOP_ENTRIES / P2M_ENTRIES_PER_PAGE)]
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +010078 __attribute__((section(".bss.page_aligned")));
79
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010080static inline unsigned p2m_top_index(unsigned long pfn)
81{
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +010082 BUG_ON(pfn >= MAX_DOMAIN_PAGES);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010083 return pfn / P2M_ENTRIES_PER_PAGE;
84}
85
86static inline unsigned p2m_index(unsigned long pfn)
87{
88 return pfn % P2M_ENTRIES_PER_PAGE;
89}
90
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +010091/* Build the parallel p2m_top_mfn structures */
92void xen_setup_mfn_list_list(void)
93{
94 unsigned pfn, idx;
95
96 for(pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
97 unsigned topidx = p2m_top_index(pfn);
98
99 p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]);
100 }
101
102 for(idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
103 unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
104 p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
105 }
106
107 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
108
109 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
110 virt_to_mfn(p2m_top_mfn_list);
111 HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages;
112}
113
114/* Set up p2m_top to point to the domain-builder provided p2m pages */
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100115void __init xen_build_dynamic_phys_to_machine(void)
116{
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100117 unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100118 unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100119 unsigned pfn;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100120
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100121 for(pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100122 unsigned topidx = p2m_top_index(pfn);
123
124 p2m_top[topidx] = &mfn_list[pfn];
125 }
126}
127
128unsigned long get_phys_to_machine(unsigned long pfn)
129{
130 unsigned topidx, idx;
131
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100132 if (unlikely(pfn >= MAX_DOMAIN_PAGES))
133 return INVALID_P2M_ENTRY;
134
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100135 topidx = p2m_top_index(pfn);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100136 idx = p2m_index(pfn);
137 return p2m_top[topidx][idx];
138}
Ingo Molnar15ce60052008-06-02 13:20:11 +0200139EXPORT_SYMBOL_GPL(get_phys_to_machine);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100140
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100141static void alloc_p2m(unsigned long **pp, unsigned long *mfnp)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100142{
143 unsigned long *p;
144 unsigned i;
145
146 p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
147 BUG_ON(p == NULL);
148
149 for(i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
150 p[i] = INVALID_P2M_ENTRY;
151
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100152 if (cmpxchg(pp, p2m_missing, p) != p2m_missing)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100153 free_page((unsigned long)p);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100154 else
155 *mfnp = virt_to_mfn(p);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100156}
157
158void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
159{
160 unsigned topidx, idx;
161
162 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
163 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
164 return;
165 }
166
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100167 if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
168 BUG_ON(mfn != INVALID_P2M_ENTRY);
169 return;
170 }
171
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100172 topidx = p2m_top_index(pfn);
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100173 if (p2m_top[topidx] == p2m_missing) {
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100174 /* no need to allocate a page to store an invalid entry */
175 if (mfn == INVALID_P2M_ENTRY)
176 return;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100177 alloc_p2m(&p2m_top[topidx], &p2m_top_mfn[topidx]);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100178 }
179
180 idx = p2m_index(pfn);
181 p2m_top[topidx][idx] = mfn;
182}
183
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700184xmaddr_t arbitrary_virt_to_machine(unsigned long address)
185{
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100186 unsigned int level;
Ingo Molnarf0646e42008-01-30 13:33:43 +0100187 pte_t *pte = lookup_address(address, &level);
Jan Beulichde067812008-05-15 13:24:52 +0100188 unsigned offset = address & ~PAGE_MASK;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700189
190 BUG_ON(pte == NULL);
191
192 return XMADDR((pte_mfn(*pte) << PAGE_SHIFT) + offset);
193}
194
195void make_lowmem_page_readonly(void *vaddr)
196{
197 pte_t *pte, ptev;
198 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100199 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700200
Ingo Molnarf0646e42008-01-30 13:33:43 +0100201 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700202 BUG_ON(pte == NULL);
203
204 ptev = pte_wrprotect(*pte);
205
206 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
207 BUG();
208}
209
210void make_lowmem_page_readwrite(void *vaddr)
211{
212 pte_t *pte, ptev;
213 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100214 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700215
Ingo Molnarf0646e42008-01-30 13:33:43 +0100216 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700217 BUG_ON(pte == NULL);
218
219 ptev = pte_mkwrite(*pte);
220
221 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
222 BUG();
223}
224
225
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100226static bool page_pinned(void *ptr)
227{
228 struct page *page = virt_to_page(ptr);
229
230 return PagePinned(page);
231}
232
233void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700234{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700235 struct multicall_space mcs;
236 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700237
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700238 preempt_disable();
239
240 mcs = xen_mc_entry(sizeof(*u));
241 u = mcs.args;
242 u->ptr = virt_to_machine(ptr).maddr;
243 u->val = pmd_val_ma(val);
244 MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
245
246 xen_mc_issue(PARAVIRT_LAZY_MMU);
247
248 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700249}
250
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100251void xen_set_pmd(pmd_t *ptr, pmd_t val)
252{
253 /* If page is not pinned, we can just update the entry
254 directly */
255 if (!page_pinned(ptr)) {
256 *ptr = val;
257 return;
258 }
259
260 xen_set_pmd_hyper(ptr, val);
261}
262
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700263/*
264 * Associate a virtual page frame with a given physical page frame
265 * and protection flags for that frame.
266 */
267void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
268{
269 pgd_t *pgd;
270 pud_t *pud;
271 pmd_t *pmd;
272 pte_t *pte;
273
274 pgd = swapper_pg_dir + pgd_index(vaddr);
275 if (pgd_none(*pgd)) {
276 BUG();
277 return;
278 }
279 pud = pud_offset(pgd, vaddr);
280 if (pud_none(*pud)) {
281 BUG();
282 return;
283 }
284 pmd = pmd_offset(pud, vaddr);
285 if (pmd_none(*pmd)) {
286 BUG();
287 return;
288 }
289 pte = pte_offset_kernel(pmd, vaddr);
290 /* <mfn,flags> stored as-is, to permit clearing entries */
291 xen_set_pte(pte, mfn_pte(mfn, flags));
292
293 /*
294 * It's enough to flush this one mapping.
295 * (PGE mappings get flushed as well)
296 */
297 __flush_tlb_one(vaddr);
298}
299
300void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
301 pte_t *ptep, pte_t pteval)
302{
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700303 /* updates to init_mm may be done without lock */
304 if (mm == &init_mm)
305 preempt_disable();
306
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700307 if (mm == current->mm || mm == &init_mm) {
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700308 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700309 struct multicall_space mcs;
310 mcs = xen_mc_entry(0);
311
312 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
313 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700314 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700315 } else
316 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700317 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700318 }
319 xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700320
321out:
322 if (mm == &init_mm)
323 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700324}
325
Jeremy Fitzhardingea987b162008-06-16 15:01:56 -0700326/* Assume pteval_t is equivalent to all the other *val_t types. */
327static pteval_t pte_mfn_to_pfn(pteval_t val)
328{
329 if (val & _PAGE_PRESENT) {
330 unsigned long mfn = (val & PTE_MASK) >> PAGE_SHIFT;
331 pteval_t flags = val & ~PTE_MASK;
332 val = (mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
333 }
334
335 return val;
336}
337
338static pteval_t pte_pfn_to_mfn(pteval_t val)
339{
340 if (val & _PAGE_PRESENT) {
341 unsigned long pfn = (val & PTE_MASK) >> PAGE_SHIFT;
342 pteval_t flags = val & ~PTE_MASK;
343 val = (pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
344 }
345
346 return val;
347}
348
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700349pteval_t xen_pte_val(pte_t pte)
350{
Jeremy Fitzhardingea987b162008-06-16 15:01:56 -0700351 return pte_mfn_to_pfn(pte.pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700352}
353
354pgdval_t xen_pgd_val(pgd_t pgd)
355{
Jeremy Fitzhardingea987b162008-06-16 15:01:56 -0700356 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700357}
358
359pte_t xen_make_pte(pteval_t pte)
360{
Jeremy Fitzhardingea987b162008-06-16 15:01:56 -0700361 pte = pte_pfn_to_mfn(pte);
362 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700363}
364
365pgd_t xen_make_pgd(pgdval_t pgd)
366{
Jeremy Fitzhardingea987b162008-06-16 15:01:56 -0700367 pgd = pte_pfn_to_mfn(pgd);
368 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700369}
370
371pmdval_t xen_pmd_val(pmd_t pmd)
372{
Jeremy Fitzhardingea987b162008-06-16 15:01:56 -0700373 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700374}
Jeremy Fitzhardinge3843fc22008-05-09 12:05:57 +0100375
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100376void xen_set_pud_hyper(pud_t *ptr, pud_t val)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700377{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700378 struct multicall_space mcs;
379 struct mmu_update *u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700380
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700381 preempt_disable();
382
383 mcs = xen_mc_entry(sizeof(*u));
384 u = mcs.args;
385 u->ptr = virt_to_machine(ptr).maddr;
386 u->val = pud_val_ma(val);
387 MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
388
389 xen_mc_issue(PARAVIRT_LAZY_MMU);
390
391 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700392}
393
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100394void xen_set_pud(pud_t *ptr, pud_t val)
395{
396 /* If page is not pinned, we can just update the entry
397 directly */
398 if (!page_pinned(ptr)) {
399 *ptr = val;
400 return;
401 }
402
403 xen_set_pud_hyper(ptr, val);
404}
405
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700406void xen_set_pte(pte_t *ptep, pte_t pte)
407{
408 ptep->pte_high = pte.pte_high;
409 smp_wmb();
410 ptep->pte_low = pte.pte_low;
411}
412
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700413void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
414{
415 set_64bit((u64 *)ptep, pte_val_ma(pte));
416}
417
418void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
419{
420 ptep->pte_low = 0;
421 smp_wmb(); /* make sure low gets written first */
422 ptep->pte_high = 0;
423}
424
425void xen_pmd_clear(pmd_t *pmdp)
426{
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100427 set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700428}
429
Jeremy Fitzhardingeabf33032008-03-17 16:37:07 -0700430pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700431{
Jeremy Fitzhardingea987b162008-06-16 15:01:56 -0700432 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700433 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700434}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700435
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700436/*
437 (Yet another) pagetable walker. This one is intended for pinning a
438 pagetable. This means that it walks a pagetable and calls the
439 callback function on each page it finds making up the page table,
440 at every level. It walks the entire pagetable, but it only bothers
441 pinning pte pages which are below pte_limit. In the normal case
442 this will be TASK_SIZE, but at boot we need to pin up to
443 FIXADDR_TOP. But the important bit is that we don't pin beyond
444 there, because then we start getting into Xen's ptes.
445*/
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700446static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, enum pt_level),
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700447 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700448{
449 pgd_t *pgd = pgd_base;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700450 int flush = 0;
451 unsigned long addr = 0;
452 unsigned long pgd_next;
453
454 BUG_ON(limit > FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700455
456 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700457 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700458
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700459 for (; addr != FIXADDR_TOP; pgd++, addr = pgd_next) {
460 pud_t *pud;
461 unsigned long pud_limit, pud_next;
462
463 pgd_next = pud_limit = pgd_addr_end(addr, FIXADDR_TOP);
464
465 if (!pgd_val(*pgd))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700466 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700467
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700468 pud = pud_offset(pgd, 0);
469
470 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700471 flush |= (*func)(virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700472
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700473 for (; addr != pud_limit; pud++, addr = pud_next) {
474 pmd_t *pmd;
475 unsigned long pmd_limit;
476
477 pud_next = pud_addr_end(addr, pud_limit);
478
479 if (pud_next < limit)
480 pmd_limit = pud_next;
481 else
482 pmd_limit = limit;
483
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700484 if (pud_none(*pud))
485 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700486
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700487 pmd = pmd_offset(pud, 0);
488
489 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700490 flush |= (*func)(virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700491
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700492 for (; addr != pmd_limit; pmd++) {
493 addr += (PAGE_SIZE * PTRS_PER_PTE);
494 if ((pmd_limit-1) < (addr-1)) {
495 addr = pmd_limit;
496 break;
497 }
498
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700499 if (pmd_none(*pmd))
500 continue;
501
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700502 flush |= (*func)(pmd_page(*pmd), PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700503 }
504 }
505 }
506
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700507 flush |= (*func)(virt_to_page(pgd_base), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700508
509 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700510}
511
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700512static spinlock_t *lock_pte(struct page *page)
513{
514 spinlock_t *ptl = NULL;
515
516#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
517 ptl = __pte_lockptr(page);
518 spin_lock(ptl);
519#endif
520
521 return ptl;
522}
523
524static void do_unlock(void *v)
525{
526 spinlock_t *ptl = v;
527 spin_unlock(ptl);
528}
529
530static void xen_do_pin(unsigned level, unsigned long pfn)
531{
532 struct mmuext_op *op;
533 struct multicall_space mcs;
534
535 mcs = __xen_mc_entry(sizeof(*op));
536 op = mcs.args;
537 op->cmd = level;
538 op->arg1.mfn = pfn_to_mfn(pfn);
539 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
540}
541
542static int pin_page(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700543{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700544 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700545 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700546
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700547 if (pgfl)
548 flush = 0; /* already pinned */
549 else if (PageHighMem(page))
550 /* kmaps need flushing if we found an unpinned
551 highpage */
552 flush = 1;
553 else {
554 void *pt = lowmem_page_address(page);
555 unsigned long pfn = page_to_pfn(page);
556 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700557 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700558
559 flush = 0;
560
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700561 ptl = NULL;
562 if (level == PT_PTE)
563 ptl = lock_pte(page);
564
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700565 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
566 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700567 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
568
569 if (level == PT_PTE)
570 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
571
572 if (ptl) {
573 /* Queue a deferred unlock for when this batch
574 is completed. */
575 xen_mc_callback(do_unlock, ptl);
576 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700577 }
578
579 return flush;
580}
581
582/* This is called just after a mm has been created, but it has not
583 been used yet. We need to make sure that its pagetable is all
584 read-only, and can be pinned. */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700585void xen_pgd_pin(pgd_t *pgd)
586{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700587 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700588
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700589 if (pgd_walk(pgd, pin_page, TASK_SIZE)) {
590 /* re-enable interrupts for kmap_flush_unused */
591 xen_mc_issue(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700592 kmap_flush_unused();
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700593 xen_mc_batch();
594 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700595
Jeremy Fitzhardinge3843fc22008-05-09 12:05:57 +0100596 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700597 xen_mc_issue(0);
598}
599
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100600/*
601 * On save, we need to pin all pagetables to make sure they get their
602 * mfns turned into pfns. Search the list for any unpinned pgds and pin
603 * them (unpinned pgds are not currently in use, probably because the
604 * process is under construction or destruction).
605 */
606void xen_mm_pin_all(void)
607{
608 unsigned long flags;
609 struct page *page;
610
611 spin_lock_irqsave(&pgd_lock, flags);
612
613 list_for_each_entry(page, &pgd_list, lru) {
614 if (!PagePinned(page)) {
615 xen_pgd_pin((pgd_t *)page_address(page));
616 SetPageSavePinned(page);
617 }
618 }
619
620 spin_unlock_irqrestore(&pgd_lock, flags);
621}
622
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700623/* The init_mm pagetable is really pinned as soon as its created, but
624 that's before we have page structures to store the bits. So do all
625 the book-keeping now. */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700626static __init int mark_pinned(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700627{
628 SetPagePinned(page);
629 return 0;
630}
631
632void __init xen_mark_init_mm_pinned(void)
633{
634 pgd_walk(init_mm.pgd, mark_pinned, FIXADDR_TOP);
635}
636
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700637static int unpin_page(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700638{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700639 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700640
641 if (pgfl && !PageHighMem(page)) {
642 void *pt = lowmem_page_address(page);
643 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700644 spinlock_t *ptl = NULL;
645 struct multicall_space mcs;
646
647 if (level == PT_PTE) {
648 ptl = lock_pte(page);
649
650 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
651 }
652
653 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700654
655 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
656 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700657 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
658
659 if (ptl) {
660 /* unlock when batch completed */
661 xen_mc_callback(do_unlock, ptl);
662 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700663 }
664
665 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700666}
667
668/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700669static void xen_pgd_unpin(pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700670{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700671 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700672
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700673 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700674
675 pgd_walk(pgd, unpin_page, TASK_SIZE);
676
677 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700678}
679
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100680/*
681 * On resume, undo any pinning done at save, so that the rest of the
682 * kernel doesn't see any unexpected pinned pagetables.
683 */
684void xen_mm_unpin_all(void)
685{
686 unsigned long flags;
687 struct page *page;
688
689 spin_lock_irqsave(&pgd_lock, flags);
690
691 list_for_each_entry(page, &pgd_list, lru) {
692 if (PageSavePinned(page)) {
693 BUG_ON(!PagePinned(page));
694 printk("unpinning pinned %p\n", page_address(page));
695 xen_pgd_unpin((pgd_t *)page_address(page));
696 ClearPageSavePinned(page);
697 }
698 }
699
700 spin_unlock_irqrestore(&pgd_lock, flags);
701}
702
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700703void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
704{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700705 spin_lock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700706 xen_pgd_pin(next->pgd);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700707 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700708}
709
710void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
711{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700712 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700713 xen_pgd_pin(mm->pgd);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700714 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700715}
716
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700717
718#ifdef CONFIG_SMP
719/* Another cpu may still have their %cr3 pointing at the pagetable, so
720 we need to repoint it somewhere else before we can unpin it. */
721static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700722{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700723 struct mm_struct *mm = info;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700724
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700725 if (__get_cpu_var(cpu_tlbstate).active_mm == mm)
726 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700727
728 /* If this cpu still has a stale cr3 reference, then make sure
729 it has been flushed. */
730 if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) {
731 load_cr3(swapper_pg_dir);
732 arch_flush_lazy_cpu_mode();
733 }
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700734}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700735
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700736static void drop_mm_ref(struct mm_struct *mm)
737{
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700738 cpumask_t mask;
739 unsigned cpu;
740
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700741 if (current->active_mm == mm) {
742 if (current->mm == mm)
743 load_cr3(swapper_pg_dir);
744 else
745 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700746 arch_flush_lazy_cpu_mode();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700747 }
748
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700749 /* Get the "official" set of cpus referring to our pagetable. */
750 mask = mm->cpu_vm_mask;
751
752 /* It's possible that a vcpu may have a stale reference to our
753 cr3, because its in lazy mode, and it hasn't yet flushed
754 its set of pending hypercalls yet. In this case, we can
755 look at its actual current cr3 value, and force it to flush
756 if needed. */
757 for_each_online_cpu(cpu) {
758 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
759 cpu_set(cpu, mask);
760 }
761
762 if (!cpus_empty(mask))
763 xen_smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700764}
765#else
766static void drop_mm_ref(struct mm_struct *mm)
767{
768 if (current->active_mm == mm)
769 load_cr3(swapper_pg_dir);
770}
771#endif
772
773/*
774 * While a process runs, Xen pins its pagetables, which means that the
775 * hypervisor forces it to be read-only, and it controls all updates
776 * to it. This means that all pagetable updates have to go via the
777 * hypervisor, which is moderately expensive.
778 *
779 * Since we're pulling the pagetable down, we switch to use init_mm,
780 * unpin old process pagetable and mark it all read-write, which
781 * allows further operations on it to be simple memory accesses.
782 *
783 * The only subtle point is that another CPU may be still using the
784 * pagetable because of lazy tlb flushing. This means we need need to
785 * switch all CPUs off this pagetable before we can unpin it.
786 */
787void xen_exit_mmap(struct mm_struct *mm)
788{
789 get_cpu(); /* make sure we don't move around */
790 drop_mm_ref(mm);
791 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700792
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -0700793 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -0700794
795 /* pgd may not be pinned in the error exit path of execve */
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100796 if (page_pinned(mm->pgd))
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -0700797 xen_pgd_unpin(mm->pgd);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700798
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -0700799 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700800}