blob: 5a16824cc2b3ca88919e484d79473af3f2e951a4 [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070043#include <linux/debugfs.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044#include <linux/bug.h>
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -070045#include <linux/vmalloc.h>
Randy Dunlap44408ad2009-05-12 13:31:40 -070046#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090047#include <linux/gfp.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -070048#include <linux/memblock.h>
Konrad Rzeszutek Wilk2222e712010-12-22 08:57:30 -050049#include <linux/seq_file.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070050
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -080051#include <trace/events/xen.h>
52
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070053#include <asm/pgtable.h>
54#include <asm/tlbflush.h>
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -070055#include <asm/fixmap.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070056#include <asm/mmu_context.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080057#include <asm/setup.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070058#include <asm/paravirt.h>
Alex Nixon7347b402010-02-19 13:31:06 -050059#include <asm/e820.h>
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070060#include <asm/linkage.h>
Alex Nixon08bbc9d2009-02-09 12:05:46 -080061#include <asm/page.h>
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -070062#include <asm/init.h>
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -070063#include <asm/pat.h>
Andrew Jones900cba82009-12-18 10:31:31 +010064#include <asm/smp.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070065
66#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070067#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070068
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080069#include <xen/xen.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070070#include <xen/page.h>
71#include <xen/interface/xen.h>
Stefano Stabellini59151002010-06-17 14:22:52 +010072#include <xen/interface/hvm/hvm_op.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080073#include <xen/interface/version.h>
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080074#include <xen/interface/memory.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080075#include <xen/hvc-console.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070076
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070077#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070078#include "mmu.h"
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070079#include "debugfs.h"
80
Alex Nixon19001c82009-02-09 12:05:46 -080081/*
82 * Protects atomic reservation decrease/increase against concurrent increases.
Daniel Kiper06f521d2011-03-08 22:45:46 +010083 * Also protects non-atomic updates of current_pages and balloon lists.
Alex Nixon19001c82009-02-09 12:05:46 -080084 */
85DEFINE_SPINLOCK(xen_reservation_lock);
86
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -040087#ifdef CONFIG_X86_32
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080088/*
89 * Identity map, in addition to plain kernel map. This needs to be
90 * large enough to allocate page table pages to allocate the rest.
91 * Each page can map 2MB.
92 */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -070093#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
94static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -040095#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080096#ifdef CONFIG_X86_64
97/* l3 pud for userspace vsyscall mapping */
98static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
99#endif /* CONFIG_X86_64 */
100
101/*
102 * Note about cr3 (pagetable base) values:
103 *
104 * xen_cr3 contains the current logical cr3 value; it contains the
105 * last set cr3. This may not be the current effective cr3, because
106 * its update may be being lazily deferred. However, a vcpu looking
107 * at its own cr3 can use this value knowing that it everything will
108 * be self-consistent.
109 *
110 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
111 * hypercall to set the vcpu cr3 is complete (so it may be a little
112 * out of date, but it will never be set early). If one vcpu is
113 * looking at another vcpu's cr3 value, it should use this variable.
114 */
115DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
116DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
117
118
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700119/*
120 * Just beyond the highest usermode address. STACK_TOP_MAX has a
121 * redzone above it, so round it up to a PGD boundary.
122 */
123#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
124
Jeremy Fitzhardinge9976b392009-02-27 09:19:26 -0800125unsigned long arbitrary_virt_to_mfn(void *vaddr)
126{
127 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
128
129 return PFN_DOWN(maddr.maddr);
130}
131
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700132xmaddr_t arbitrary_virt_to_machine(void *vaddr)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700133{
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700134 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100135 unsigned int level;
Chris Lalancette9f32d212008-10-23 17:40:25 -0700136 pte_t *pte;
137 unsigned offset;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700138
Chris Lalancette9f32d212008-10-23 17:40:25 -0700139 /*
140 * if the PFN is in the linear mapped vaddr range, we can just use
141 * the (quick) virt_to_machine() p2m lookup
142 */
143 if (virt_addr_valid(vaddr))
144 return virt_to_machine(vaddr);
145
146 /* otherwise we have to do a (slower) full page-table walk */
147
148 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700149 BUG_ON(pte == NULL);
Chris Lalancette9f32d212008-10-23 17:40:25 -0700150 offset = address & ~PAGE_MASK;
Jeremy Fitzhardingeebd879e2008-07-08 15:06:54 -0700151 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700152}
Stephen Rothwellde23be52011-01-15 10:36:26 +1100153EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700154
155void make_lowmem_page_readonly(void *vaddr)
156{
157 pte_t *pte, ptev;
158 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100159 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700160
Ingo Molnarf0646e42008-01-30 13:33:43 +0100161 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700162 if (pte == NULL)
163 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700164
165 ptev = pte_wrprotect(*pte);
166
167 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
168 BUG();
169}
170
171void make_lowmem_page_readwrite(void *vaddr)
172{
173 pte_t *pte, ptev;
174 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100175 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700176
Ingo Molnarf0646e42008-01-30 13:33:43 +0100177 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700178 if (pte == NULL)
179 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700180
181 ptev = pte_mkwrite(*pte);
182
183 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
184 BUG();
185}
186
187
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700188static bool xen_page_pinned(void *ptr)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100189{
190 struct page *page = virt_to_page(ptr);
191
192 return PagePinned(page);
193}
194
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800195void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800196{
197 struct multicall_space mcs;
198 struct mmu_update *u;
199
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800200 trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
201
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800202 mcs = xen_mc_entry(sizeof(*u));
203 u = mcs.args;
204
205 /* ptep might be kmapped when using 32-bit HIGHPTE */
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -0800206 u->ptr = virt_to_machine(ptep).maddr;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800207 u->val = pte_val_ma(pteval);
208
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800209 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800210
211 xen_mc_issue(PARAVIRT_LAZY_MMU);
212}
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800213EXPORT_SYMBOL_GPL(xen_set_domain_pte);
214
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700215static void xen_extend_mmu_update(const struct mmu_update *update)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700216{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700217 struct multicall_space mcs;
218 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700219
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700220 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
221
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700222 if (mcs.mc != NULL) {
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700223 mcs.mc->args[1]++;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700224 } else {
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700225 mcs = __xen_mc_entry(sizeof(*u));
226 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
227 }
228
229 u = mcs.args;
230 *u = *update;
231}
232
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800233static void xen_extend_mmuext_op(const struct mmuext_op *op)
234{
235 struct multicall_space mcs;
236 struct mmuext_op *u;
237
238 mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
239
240 if (mcs.mc != NULL) {
241 mcs.mc->args[1]++;
242 } else {
243 mcs = __xen_mc_entry(sizeof(*u));
244 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
245 }
246
247 u = mcs.args;
248 *u = *op;
249}
250
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800251static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700252{
253 struct mmu_update u;
254
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700255 preempt_disable();
256
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700257 xen_mc_batch();
258
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700259 /* ptr may be ioremapped for 64-bit pagetable setup */
260 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700261 u.val = pmd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700262 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700263
264 xen_mc_issue(PARAVIRT_LAZY_MMU);
265
266 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700267}
268
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800269static void xen_set_pmd(pmd_t *ptr, pmd_t val)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100270{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800271 trace_xen_mmu_set_pmd(ptr, val);
272
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100273 /* If page is not pinned, we can just update the entry
274 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700275 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100276 *ptr = val;
277 return;
278 }
279
280 xen_set_pmd_hyper(ptr, val);
281}
282
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700283/*
284 * Associate a virtual page frame with a given physical page frame
285 * and protection flags for that frame.
286 */
287void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
288{
Jeremy Fitzhardinge836fe2f2008-07-08 15:06:58 -0700289 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700290}
291
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800292static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
293{
294 struct mmu_update u;
295
296 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
297 return false;
298
299 xen_mc_batch();
300
301 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
302 u.val = pte_val_ma(pteval);
303 xen_extend_mmu_update(&u);
304
305 xen_mc_issue(PARAVIRT_LAZY_MMU);
306
307 return true;
308}
309
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800310static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800311{
David Vrabeld095d432012-07-09 11:39:05 +0100312 if (!xen_batched_set_pte(ptep, pteval)) {
313 /*
314 * Could call native_set_pte() here and trap and
315 * emulate the PTE write but with 32-bit guests this
316 * needs two traps (one for each of the two 32-bit
317 * words in the PTE) so do one hypercall directly
318 * instead.
319 */
320 struct mmu_update u;
321
322 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
323 u.val = pte_val_ma(pteval);
324 HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
325 }
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800326}
327
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800328static void xen_set_pte(pte_t *ptep, pte_t pteval)
329{
330 trace_xen_mmu_set_pte(ptep, pteval);
331 __xen_set_pte(ptep, pteval);
332}
333
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800334static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700335 pte_t *ptep, pte_t pteval)
336{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800337 trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
338 __xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700339}
340
Tejf63c2f22008-12-16 11:56:06 -0800341pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
342 unsigned long addr, pte_t *ptep)
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700343{
344 /* Just return the pte as-is. We preserve the bits on commit */
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800345 trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700346 return *ptep;
347}
348
349void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
350 pte_t *ptep, pte_t pte)
351{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700352 struct mmu_update u;
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700353
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800354 trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700355 xen_mc_batch();
356
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -0800357 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700358 u.val = pte_val_ma(pte);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700359 xen_extend_mmu_update(&u);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700360
361 xen_mc_issue(PARAVIRT_LAZY_MMU);
362}
363
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700364/* Assume pteval_t is equivalent to all the other *val_t types. */
365static pteval_t pte_mfn_to_pfn(pteval_t val)
366{
367 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700368 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Konrad Rzeszutek Wilkb7e5ffe2012-05-03 16:14:14 -0400369 unsigned long pfn = mfn_to_pfn(mfn);
370
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700371 pteval_t flags = val & PTE_FLAGS_MASK;
Konrad Rzeszutek Wilkb7e5ffe2012-05-03 16:14:14 -0400372 if (unlikely(pfn == ~0))
373 val = flags & ~_PAGE_PRESENT;
374 else
375 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700376 }
377
378 return val;
379}
380
381static pteval_t pte_pfn_to_mfn(pteval_t val)
382{
383 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700384 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700385 pteval_t flags = val & PTE_FLAGS_MASK;
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500386 unsigned long mfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700387
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500388 if (!xen_feature(XENFEAT_auto_translated_physmap))
389 mfn = get_phys_to_machine(pfn);
390 else
391 mfn = pfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700392 /*
393 * If there's no mfn for the pfn, then just create an
394 * empty non-present pte. Unfortunately this loses
395 * information about the original pfn, so
396 * pte_mfn_to_pfn is asymmetric.
397 */
398 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
399 mfn = 0;
400 flags = 0;
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500401 } else {
402 /*
403 * Paramount to do this test _after_ the
404 * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY &
405 * IDENTITY_FRAME_BIT resolves to true.
406 */
407 mfn &= ~FOREIGN_FRAME_BIT;
408 if (mfn & IDENTITY_FRAME_BIT) {
409 mfn &= ~IDENTITY_FRAME_BIT;
410 flags |= _PAGE_IOMAP;
411 }
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700412 }
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700413 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700414 }
415
416 return val;
417}
418
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800419static pteval_t iomap_pte(pteval_t val)
420{
421 if (val & _PAGE_PRESENT) {
422 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
423 pteval_t flags = val & PTE_FLAGS_MASK;
424
425 /* We assume the pte frame number is a MFN, so
426 just use it as-is. */
427 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
428 }
429
430 return val;
431}
432
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800433static pteval_t xen_pte_val(pte_t pte)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700434{
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700435 pteval_t pteval = pte.pte;
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500436#if 0
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700437 /* If this is a WC pte, convert back from Xen WC to Linux WC */
438 if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {
439 WARN_ON(!pat_enabled);
440 pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
441 }
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500442#endif
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700443 if (xen_initial_domain() && (pteval & _PAGE_IOMAP))
444 return pteval;
445
446 return pte_mfn_to_pfn(pteval);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700447}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800448PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700449
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800450static pgdval_t xen_pgd_val(pgd_t pgd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700451{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700452 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700453}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800454PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700455
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700456/*
457 * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7
458 * are reserved for now, to correspond to the Intel-reserved PAT
459 * types.
460 *
461 * We expect Linux's PAT set as follows:
462 *
463 * Idx PTE flags Linux Xen Default
464 * 0 WB WB WB
465 * 1 PWT WC WT WT
466 * 2 PCD UC- UC- UC-
467 * 3 PCD PWT UC UC UC
468 * 4 PAT WB WC WB
469 * 5 PAT PWT WC WP WT
470 * 6 PAT PCD UC- UC UC-
471 * 7 PAT PCD PWT UC UC UC
472 */
473
474void xen_set_pat(u64 pat)
475{
476 /* We expect Linux to use a PAT setting of
477 * UC UC- WC WB (ignoring the PAT flag) */
478 WARN_ON(pat != 0x0007010600070106ull);
479}
480
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800481static pte_t xen_make_pte(pteval_t pte)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700482{
Alex Nixon7347b402010-02-19 13:31:06 -0500483 phys_addr_t addr = (pte & PTE_PFN_MASK);
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500484#if 0
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700485 /* If Linux is trying to set a WC pte, then map to the Xen WC.
486 * If _PAGE_PAT is set, then it probably means it is really
487 * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope
488 * things work out OK...
489 *
490 * (We should never see kernel mappings with _PAGE_PSE set,
491 * but we could see hugetlbfs mappings, I think.).
492 */
493 if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) {
494 if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)
495 pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
496 }
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500497#endif
Alex Nixon7347b402010-02-19 13:31:06 -0500498 /*
499 * Unprivileged domains are allowed to do IOMAPpings for
500 * PCI passthrough, but not map ISA space. The ISA
501 * mappings are just dummy local mappings to keep other
502 * parts of the kernel happy.
503 */
504 if (unlikely(pte & _PAGE_IOMAP) &&
505 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800506 pte = iomap_pte(pte);
Alex Nixon7347b402010-02-19 13:31:06 -0500507 } else {
508 pte &= ~_PAGE_IOMAP;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800509 pte = pte_pfn_to_mfn(pte);
Alex Nixon7347b402010-02-19 13:31:06 -0500510 }
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800511
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700512 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700513}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800514PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700515
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800516static pgd_t xen_make_pgd(pgdval_t pgd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700517{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700518 pgd = pte_pfn_to_mfn(pgd);
519 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700520}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800521PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700522
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800523static pmdval_t xen_pmd_val(pmd_t pmd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700524{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700525 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700526}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800527PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100528
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800529static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700530{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700531 struct mmu_update u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700532
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700533 preempt_disable();
534
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700535 xen_mc_batch();
536
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700537 /* ptr may be ioremapped for 64-bit pagetable setup */
538 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700539 u.val = pud_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700540 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700541
542 xen_mc_issue(PARAVIRT_LAZY_MMU);
543
544 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700545}
546
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800547static void xen_set_pud(pud_t *ptr, pud_t val)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100548{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800549 trace_xen_mmu_set_pud(ptr, val);
550
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100551 /* If page is not pinned, we can just update the entry
552 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700553 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100554 *ptr = val;
555 return;
556 }
557
558 xen_set_pud_hyper(ptr, val);
559}
560
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700561#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800562static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700563{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800564 trace_xen_mmu_set_pte_atomic(ptep, pte);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700565 set_64bit((u64 *)ptep, native_pte_val(pte));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700566}
567
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800568static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700569{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800570 trace_xen_mmu_pte_clear(mm, addr, ptep);
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800571 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
572 native_pte_clear(mm, addr, ptep);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700573}
574
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800575static void xen_pmd_clear(pmd_t *pmdp)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700576{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800577 trace_xen_mmu_pmd_clear(pmdp);
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100578 set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700579}
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700580#endif /* CONFIG_X86_PAE */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700581
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800582static pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700583{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700584 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700585 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700586}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800587PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700588
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700589#if PAGETABLE_LEVELS == 4
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800590static pudval_t xen_pud_val(pud_t pud)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700591{
592 return pte_mfn_to_pfn(pud.pud);
593}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800594PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700595
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800596static pud_t xen_make_pud(pudval_t pud)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700597{
598 pud = pte_pfn_to_mfn(pud);
599
600 return native_make_pud(pud);
601}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800602PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700603
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800604static pgd_t *xen_get_user_pgd(pgd_t *pgd)
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700605{
606 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
607 unsigned offset = pgd - pgd_page;
608 pgd_t *user_ptr = NULL;
609
610 if (offset < pgd_index(USER_LIMIT)) {
611 struct page *page = virt_to_page(pgd_page);
612 user_ptr = (pgd_t *)page->private;
613 if (user_ptr)
614 user_ptr += offset;
615 }
616
617 return user_ptr;
618}
619
620static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700621{
622 struct mmu_update u;
623
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700624 u.ptr = virt_to_machine(ptr).maddr;
625 u.val = pgd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700626 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700627}
628
629/*
630 * Raw hypercall-based set_pgd, intended for in early boot before
631 * there's a page structure. This implies:
632 * 1. The only existing pagetable is the kernel's
633 * 2. It is always pinned
634 * 3. It has no user pagetable attached to it
635 */
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800636static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700637{
638 preempt_disable();
639
640 xen_mc_batch();
641
642 __xen_set_pgd_hyper(ptr, val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700643
644 xen_mc_issue(PARAVIRT_LAZY_MMU);
645
646 preempt_enable();
647}
648
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800649static void xen_set_pgd(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700650{
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700651 pgd_t *user_ptr = xen_get_user_pgd(ptr);
652
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800653 trace_xen_mmu_set_pgd(ptr, user_ptr, val);
654
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700655 /* If page is not pinned, we can just update the entry
656 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700657 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700658 *ptr = val;
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700659 if (user_ptr) {
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700660 WARN_ON(xen_page_pinned(user_ptr));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700661 *user_ptr = val;
662 }
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700663 return;
664 }
665
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700666 /* If it's pinned, then we can at least batch the kernel and
667 user updates together. */
668 xen_mc_batch();
669
670 __xen_set_pgd_hyper(ptr, val);
671 if (user_ptr)
672 __xen_set_pgd_hyper(user_ptr, val);
673
674 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700675}
676#endif /* PAGETABLE_LEVELS == 4 */
677
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700678/*
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700679 * (Yet another) pagetable walker. This one is intended for pinning a
680 * pagetable. This means that it walks a pagetable and calls the
681 * callback function on each page it finds making up the page table,
682 * at every level. It walks the entire pagetable, but it only bothers
683 * pinning pte pages which are below limit. In the normal case this
684 * will be STACK_TOP_MAX, but at boot we need to pin up to
685 * FIXADDR_TOP.
686 *
687 * For 32-bit the important bit is that we don't pin beyond there,
688 * because then we start getting into Xen's ptes.
689 *
690 * For 64-bit, we must skip the Xen hole in the middle of the address
691 * space, just after the big x86-64 virtual hole.
692 */
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000693static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
694 int (*func)(struct mm_struct *mm, struct page *,
695 enum pt_level),
696 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700697{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700698 int flush = 0;
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700699 unsigned hole_low, hole_high;
700 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
701 unsigned pgdidx, pudidx, pmdidx;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700702
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700703 /* The limit is the last byte to be touched */
704 limit--;
705 BUG_ON(limit >= FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700706
707 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700708 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700709
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700710 /*
711 * 64-bit has a great big hole in the middle of the address
712 * space, which contains the Xen mappings. On 32-bit these
713 * will end up making a zero-sized hole and so is a no-op.
714 */
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700715 hole_low = pgd_index(USER_LIMIT);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700716 hole_high = pgd_index(PAGE_OFFSET);
717
718 pgdidx_limit = pgd_index(limit);
719#if PTRS_PER_PUD > 1
720 pudidx_limit = pud_index(limit);
721#else
722 pudidx_limit = 0;
723#endif
724#if PTRS_PER_PMD > 1
725 pmdidx_limit = pmd_index(limit);
726#else
727 pmdidx_limit = 0;
728#endif
729
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700730 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700731 pud_t *pud;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700732
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700733 if (pgdidx >= hole_low && pgdidx < hole_high)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700734 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700735
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700736 if (!pgd_val(pgd[pgdidx]))
737 continue;
738
739 pud = pud_offset(&pgd[pgdidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700740
741 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700742 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700743
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700744 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700745 pmd_t *pmd;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700746
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700747 if (pgdidx == pgdidx_limit &&
748 pudidx > pudidx_limit)
749 goto out;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700750
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700751 if (pud_none(pud[pudidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700752 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700753
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700754 pmd = pmd_offset(&pud[pudidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700755
756 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700757 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700758
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700759 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
760 struct page *pte;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700761
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700762 if (pgdidx == pgdidx_limit &&
763 pudidx == pudidx_limit &&
764 pmdidx > pmdidx_limit)
765 goto out;
766
767 if (pmd_none(pmd[pmdidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700768 continue;
769
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700770 pte = pmd_page(pmd[pmdidx]);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700771 flush |= (*func)(mm, pte, PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700772 }
773 }
774 }
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700775
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700776out:
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700777 /* Do the top level last, so that the callbacks can use it as
778 a cue to do final things like tlb flushes. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700779 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700780
781 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700782}
783
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000784static int xen_pgd_walk(struct mm_struct *mm,
785 int (*func)(struct mm_struct *mm, struct page *,
786 enum pt_level),
787 unsigned long limit)
788{
789 return __xen_pgd_walk(mm, mm->pgd, func, limit);
790}
791
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700792/* If we're using split pte locks, then take the page's lock and
793 return a pointer to it. Otherwise return NULL. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700794static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700795{
796 spinlock_t *ptl = NULL;
797
Jeremy Fitzhardingef7d0b922008-09-09 15:43:22 -0700798#if USE_SPLIT_PTLOCKS
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700799 ptl = __pte_lockptr(page);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700800 spin_lock_nest_lock(ptl, &mm->page_table_lock);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700801#endif
802
803 return ptl;
804}
805
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700806static void xen_pte_unlock(void *v)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700807{
808 spinlock_t *ptl = v;
809 spin_unlock(ptl);
810}
811
812static void xen_do_pin(unsigned level, unsigned long pfn)
813{
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800814 struct mmuext_op op;
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700815
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800816 op.cmd = level;
817 op.arg1.mfn = pfn_to_mfn(pfn);
818
819 xen_extend_mmuext_op(&op);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700820}
821
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700822static int xen_pin_page(struct mm_struct *mm, struct page *page,
823 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700824{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700825 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700826 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700827
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700828 if (pgfl)
829 flush = 0; /* already pinned */
830 else if (PageHighMem(page))
831 /* kmaps need flushing if we found an unpinned
832 highpage */
833 flush = 1;
834 else {
835 void *pt = lowmem_page_address(page);
836 unsigned long pfn = page_to_pfn(page);
837 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700838 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700839
840 flush = 0;
841
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700842 /*
843 * We need to hold the pagetable lock between the time
844 * we make the pagetable RO and when we actually pin
845 * it. If we don't, then other users may come in and
846 * attempt to update the pagetable by writing it,
847 * which will fail because the memory is RO but not
848 * pinned, so Xen won't do the trap'n'emulate.
849 *
850 * If we're using split pte locks, we can't hold the
851 * entire pagetable's worth of locks during the
852 * traverse, because we may wrap the preempt count (8
853 * bits). The solution is to mark RO and pin each PTE
854 * page while holding the lock. This means the number
855 * of locks we end up holding is never more than a
856 * batch size (~32 entries, at present).
857 *
858 * If we're not using split pte locks, we needn't pin
859 * the PTE pages independently, because we're
860 * protected by the overall pagetable lock.
861 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700862 ptl = NULL;
863 if (level == PT_PTE)
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700864 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700865
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700866 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
867 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700868 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
869
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700870 if (ptl) {
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700871 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
872
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700873 /* Queue a deferred unlock for when this batch
874 is completed. */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700875 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700876 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700877 }
878
879 return flush;
880}
881
882/* This is called just after a mm has been created, but it has not
883 been used yet. We need to make sure that its pagetable is all
884 read-only, and can be pinned. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700885static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700886{
Jeremy Fitzhardinge5f94fb52010-12-17 15:31:23 -0800887 trace_xen_mmu_pgd_pin(mm, pgd);
888
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700889 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700890
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000891 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100892 /* re-enable interrupts for flushing */
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700893 xen_mc_issue(0);
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100894
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700895 kmap_flush_unused();
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100896
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700897 xen_mc_batch();
898 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700899
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700900#ifdef CONFIG_X86_64
901 {
902 pgd_t *user_pgd = xen_get_user_pgd(pgd);
903
904 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
905
906 if (user_pgd) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700907 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
Tejf63c2f22008-12-16 11:56:06 -0800908 xen_do_pin(MMUEXT_PIN_L4_TABLE,
909 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700910 }
911 }
912#else /* CONFIG_X86_32 */
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700913#ifdef CONFIG_X86_PAE
914 /* Need to make sure unshared kernel PMD is pinnable */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -0800915 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700916 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700917#endif
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100918 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700919#endif /* CONFIG_X86_64 */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700920 xen_mc_issue(0);
921}
922
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700923static void xen_pgd_pin(struct mm_struct *mm)
924{
925 __xen_pgd_pin(mm, mm->pgd);
926}
927
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100928/*
929 * On save, we need to pin all pagetables to make sure they get their
930 * mfns turned into pfns. Search the list for any unpinned pgds and pin
931 * them (unpinned pgds are not currently in use, probably because the
932 * process is under construction or destruction).
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700933 *
934 * Expected to be called in stop_machine() ("equivalent to taking
935 * every spinlock in the system"), so the locking doesn't really
936 * matter all that much.
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100937 */
938void xen_mm_pin_all(void)
939{
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100940 struct page *page;
941
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800942 spin_lock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100943
944 list_for_each_entry(page, &pgd_list, lru) {
945 if (!PagePinned(page)) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700946 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100947 SetPageSavePinned(page);
948 }
949 }
950
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800951 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100952}
953
Eduardo Habkostc1f2f092008-07-08 15:06:24 -0700954/*
955 * The init_mm pagetable is really pinned as soon as its created, but
956 * that's before we have page structures to store the bits. So do all
957 * the book-keeping now.
958 */
Daniel Kiper3f5089532011-05-12 17:19:53 -0400959static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700960 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700961{
962 SetPagePinned(page);
963 return 0;
964}
965
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -0700966static void __init xen_mark_init_mm_pinned(void)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700967{
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700968 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700969}
970
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700971static int xen_unpin_page(struct mm_struct *mm, struct page *page,
972 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700973{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700974 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700975
976 if (pgfl && !PageHighMem(page)) {
977 void *pt = lowmem_page_address(page);
978 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700979 spinlock_t *ptl = NULL;
980 struct multicall_space mcs;
981
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700982 /*
983 * Do the converse to pin_page. If we're using split
984 * pte locks, we must be holding the lock for while
985 * the pte page is unpinned but still RO to prevent
986 * concurrent updates from seeing it in this
987 * partially-pinned state.
988 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700989 if (level == PT_PTE) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700990 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700991
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700992 if (ptl)
993 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700994 }
995
996 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700997
998 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
999 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001000 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
1001
1002 if (ptl) {
1003 /* unlock when batch completed */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001004 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001005 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001006 }
1007
1008 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001009}
1010
1011/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001012static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001013{
Jeremy Fitzhardinge5f94fb52010-12-17 15:31:23 -08001014 trace_xen_mmu_pgd_unpin(mm, pgd);
1015
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001016 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001017
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001018 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001019
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001020#ifdef CONFIG_X86_64
1021 {
1022 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1023
1024 if (user_pgd) {
Tejf63c2f22008-12-16 11:56:06 -08001025 xen_do_pin(MMUEXT_UNPIN_TABLE,
1026 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001027 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001028 }
1029 }
1030#endif
1031
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001032#ifdef CONFIG_X86_PAE
1033 /* Need to make sure unshared kernel PMD is unpinned */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -08001034 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001035 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001036#endif
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001037
Ian Campbell86bbc2c2008-11-21 10:21:33 +00001038 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001039
1040 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001041}
1042
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001043static void xen_pgd_unpin(struct mm_struct *mm)
1044{
1045 __xen_pgd_unpin(mm, mm->pgd);
1046}
1047
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001048/*
1049 * On resume, undo any pinning done at save, so that the rest of the
1050 * kernel doesn't see any unexpected pinned pagetables.
1051 */
1052void xen_mm_unpin_all(void)
1053{
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001054 struct page *page;
1055
Andrea Arcangelia79e53d2011-02-16 15:45:22 -08001056 spin_lock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001057
1058 list_for_each_entry(page, &pgd_list, lru) {
1059 if (PageSavePinned(page)) {
1060 BUG_ON(!PagePinned(page));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001061 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001062 ClearPageSavePinned(page);
1063 }
1064 }
1065
Andrea Arcangelia79e53d2011-02-16 15:45:22 -08001066 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001067}
1068
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001069static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001070{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001071 spin_lock(&next->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001072 xen_pgd_pin(next);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001073 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001074}
1075
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001076static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001077{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001078 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001079 xen_pgd_pin(mm);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001080 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001081}
1082
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001083
1084#ifdef CONFIG_SMP
1085/* Another cpu may still have their %cr3 pointing at the pagetable, so
1086 we need to repoint it somewhere else before we can unpin it. */
1087static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001088{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001089 struct mm_struct *mm = info;
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001090 struct mm_struct *active_mm;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001091
Alex Shi2113f462012-01-13 23:53:35 +08001092 active_mm = this_cpu_read(cpu_tlbstate.active_mm);
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001093
Alex Shi2113f462012-01-13 23:53:35 +08001094 if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001095 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001096
1097 /* If this cpu still has a stale cr3 reference, then make sure
1098 it has been flushed. */
Alex Shi2113f462012-01-13 23:53:35 +08001099 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001100 load_cr3(swapper_pg_dir);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001101}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001102
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001103static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001104{
Mike Travise4d98202008-12-16 17:34:05 -08001105 cpumask_var_t mask;
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001106 unsigned cpu;
1107
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001108 if (current->active_mm == mm) {
1109 if (current->mm == mm)
1110 load_cr3(swapper_pg_dir);
1111 else
1112 leave_mm(smp_processor_id());
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001113 }
1114
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001115 /* Get the "official" set of cpus referring to our pagetable. */
Mike Travise4d98202008-12-16 17:34:05 -08001116 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1117 for_each_online_cpu(cpu) {
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001118 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
Mike Travise4d98202008-12-16 17:34:05 -08001119 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1120 continue;
1121 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1122 }
1123 return;
1124 }
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001125 cpumask_copy(mask, mm_cpumask(mm));
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001126
1127 /* It's possible that a vcpu may have a stale reference to our
1128 cr3, because its in lazy mode, and it hasn't yet flushed
1129 its set of pending hypercalls yet. In this case, we can
1130 look at its actual current cr3 value, and force it to flush
1131 if needed. */
1132 for_each_online_cpu(cpu) {
1133 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
Mike Travise4d98202008-12-16 17:34:05 -08001134 cpumask_set_cpu(cpu, mask);
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001135 }
1136
Mike Travise4d98202008-12-16 17:34:05 -08001137 if (!cpumask_empty(mask))
1138 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1139 free_cpumask_var(mask);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001140}
1141#else
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001142static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001143{
1144 if (current->active_mm == mm)
1145 load_cr3(swapper_pg_dir);
1146}
1147#endif
1148
1149/*
1150 * While a process runs, Xen pins its pagetables, which means that the
1151 * hypervisor forces it to be read-only, and it controls all updates
1152 * to it. This means that all pagetable updates have to go via the
1153 * hypervisor, which is moderately expensive.
1154 *
1155 * Since we're pulling the pagetable down, we switch to use init_mm,
1156 * unpin old process pagetable and mark it all read-write, which
1157 * allows further operations on it to be simple memory accesses.
1158 *
1159 * The only subtle point is that another CPU may be still using the
1160 * pagetable because of lazy tlb flushing. This means we need need to
1161 * switch all CPUs off this pagetable before we can unpin it.
1162 */
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001163static void xen_exit_mmap(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001164{
1165 get_cpu(); /* make sure we don't move around */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001166 xen_drop_mm_ref(mm);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001167 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001168
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001169 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -07001170
1171 /* pgd may not be pinned in the error exit path of execve */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001172 if (xen_page_pinned(mm->pgd))
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001173 xen_pgd_unpin(mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001174
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001175 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001176}
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07001177
Attilio Raoc7112882012-08-21 21:22:40 +01001178static void xen_post_allocator_init(void);
1179
Stefano Stabellini279b7062011-04-14 15:49:41 +01001180static __init void xen_mapping_pagetable_reserve(u64 start, u64 end)
1181{
1182 /* reserve the range used */
1183 native_pagetable_reserve(start, end);
1184
1185 /* set as RW the rest */
1186 printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end,
1187 PFN_PHYS(pgt_buf_top));
1188 while (end < PFN_PHYS(pgt_buf_top)) {
1189 make_lowmem_page_readwrite(__va(end));
1190 end += PAGE_SIZE;
1191 }
1192}
1193
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001194#ifdef CONFIG_X86_64
1195static void __init xen_cleanhighmap(unsigned long vaddr,
1196 unsigned long vaddr_end)
1197{
1198 unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
1199 pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
1200
1201 /* NOTE: The loop is more greedy than the cleanup_highmap variant.
1202 * We include the PMD passed in on _both_ boundaries. */
1203 for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE));
1204 pmd++, vaddr += PMD_SIZE) {
1205 if (pmd_none(*pmd))
1206 continue;
1207 if (vaddr < (unsigned long) _text || vaddr > kernel_end)
1208 set_pmd(pmd, __pmd(0));
1209 }
1210 /* In case we did something silly, we should crash in this function
1211 * instead of somewhere later and be confusing. */
1212 xen_mc_flush();
1213}
1214#endif
Konrad Rzeszutek Wilk98104c32012-09-12 11:16:27 -04001215static void __init xen_pagetable_init(void)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001216{
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001217#ifdef CONFIG_X86_64
1218 unsigned long size;
1219 unsigned long addr;
1220#endif
Konrad Rzeszutek Wilk98104c32012-09-12 11:16:27 -04001221 paging_init();
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001222 xen_setup_shared_info();
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001223#ifdef CONFIG_X86_64
1224 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1225 unsigned long new_mfn_list;
1226
1227 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1228
1229 /* On 32-bit, we get zero so this never gets executed. */
1230 new_mfn_list = xen_revector_p2m_tree();
1231 if (new_mfn_list && new_mfn_list != xen_start_info->mfn_list) {
1232 /* using __ka address and sticking INVALID_P2M_ENTRY! */
1233 memset((void *)xen_start_info->mfn_list, 0xff, size);
1234
1235 /* We should be in __ka space. */
1236 BUG_ON(xen_start_info->mfn_list < __START_KERNEL_map);
1237 addr = xen_start_info->mfn_list;
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001238 /* We roundup to the PMD, which means that if anybody at this stage is
1239 * using the __ka address of xen_start_info or xen_start_info->shared_info
1240 * they are in going to crash. Fortunatly we have already revectored
1241 * in xen_setup_kernel_pagetable and in xen_setup_shared_info. */
1242 size = roundup(size, PMD_SIZE);
1243 xen_cleanhighmap(addr, addr + size);
1244
Konrad Rzeszutek Wilk785f6232012-08-14 16:37:31 -04001245 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001246 memblock_free(__pa(xen_start_info->mfn_list), size);
1247 /* And revector! Bye bye old array */
1248 xen_start_info->mfn_list = new_mfn_list;
Konrad Rzeszutek Wilk32873182012-08-17 09:35:31 -04001249 } else
1250 goto skip;
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001251 }
Konrad Rzeszutek Wilk3aca7fb2012-08-14 14:34:00 -04001252 /* At this stage, cleanup_highmap has already cleaned __ka space
1253 * from _brk_limit way up to the max_pfn_mapped (which is the end of
1254 * the ramdisk). We continue on, erasing PMD entries that point to page
1255 * tables - do note that they are accessible at this stage via __va.
1256 * For good measure we also round up to the PMD - which means that if
1257 * anybody is using __ka address to the initial boot-stack - and try
1258 * to use it - they are going to crash. The xen_start_info has been
1259 * taken care of already in xen_setup_kernel_pagetable. */
1260 addr = xen_start_info->pt_base;
1261 size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE);
1262
1263 xen_cleanhighmap(addr, addr + size);
1264 xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
1265#ifdef DEBUG
1266 /* This is superflous and is not neccessary, but you know what
1267 * lets do it. The MODULES_VADDR -> MODULES_END should be clear of
1268 * anything at this stage. */
1269 xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
1270#endif
Konrad Rzeszutek Wilk32873182012-08-17 09:35:31 -04001271skip:
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001272#endif
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02001273 xen_post_allocator_init();
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001274}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001275static void xen_write_cr2(unsigned long cr2)
1276{
Alex Shi2113f462012-01-13 23:53:35 +08001277 this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001278}
1279
1280static unsigned long xen_read_cr2(void)
1281{
Alex Shi2113f462012-01-13 23:53:35 +08001282 return this_cpu_read(xen_vcpu)->arch.cr2;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001283}
1284
1285unsigned long xen_read_cr2_direct(void)
1286{
Alex Shi2113f462012-01-13 23:53:35 +08001287 return this_cpu_read(xen_vcpu_info.arch.cr2);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001288}
1289
1290static void xen_flush_tlb(void)
1291{
1292 struct mmuext_op *op;
1293 struct multicall_space mcs;
1294
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001295 trace_xen_mmu_flush_tlb(0);
1296
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001297 preempt_disable();
1298
1299 mcs = xen_mc_entry(sizeof(*op));
1300
1301 op = mcs.args;
1302 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1303 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1304
1305 xen_mc_issue(PARAVIRT_LAZY_MMU);
1306
1307 preempt_enable();
1308}
1309
1310static void xen_flush_tlb_single(unsigned long addr)
1311{
1312 struct mmuext_op *op;
1313 struct multicall_space mcs;
1314
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001315 trace_xen_mmu_flush_tlb_single(addr);
1316
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001317 preempt_disable();
1318
1319 mcs = xen_mc_entry(sizeof(*op));
1320 op = mcs.args;
1321 op->cmd = MMUEXT_INVLPG_LOCAL;
1322 op->arg1.linear_addr = addr & PAGE_MASK;
1323 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1324
1325 xen_mc_issue(PARAVIRT_LAZY_MMU);
1326
1327 preempt_enable();
1328}
1329
1330static void xen_flush_tlb_others(const struct cpumask *cpus,
Alex Shie7b52ff2012-06-28 09:02:17 +08001331 struct mm_struct *mm, unsigned long start,
1332 unsigned long end)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001333{
1334 struct {
1335 struct mmuext_op op;
Konrad Rzeszutek Wilk32dd1192011-06-30 09:12:40 -04001336#ifdef CONFIG_SMP
Andrew Jones900cba82009-12-18 10:31:31 +01001337 DECLARE_BITMAP(mask, num_processors);
Konrad Rzeszutek Wilk32dd1192011-06-30 09:12:40 -04001338#else
1339 DECLARE_BITMAP(mask, NR_CPUS);
1340#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001341 } *args;
1342 struct multicall_space mcs;
1343
Alex Shie7b52ff2012-06-28 09:02:17 +08001344 trace_xen_mmu_flush_tlb_others(cpus, mm, start, end);
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001345
Jeremy Fitzhardingee3f8a742009-03-04 17:36:57 -08001346 if (cpumask_empty(cpus))
1347 return; /* nothing to do */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001348
1349 mcs = xen_mc_entry(sizeof(*args));
1350 args = mcs.args;
1351 args->op.arg2.vcpumask = to_cpumask(args->mask);
1352
1353 /* Remove us, and any offline CPUS. */
1354 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1355 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001356
Alex Shie7b52ff2012-06-28 09:02:17 +08001357 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
Alex Shice7184b2012-08-24 08:55:13 +00001358 if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001359 args->op.cmd = MMUEXT_INVLPG_MULTI;
Alex Shie7b52ff2012-06-28 09:02:17 +08001360 args->op.arg1.linear_addr = start;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001361 }
1362
1363 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1364
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001365 xen_mc_issue(PARAVIRT_LAZY_MMU);
1366}
1367
1368static unsigned long xen_read_cr3(void)
1369{
Alex Shi2113f462012-01-13 23:53:35 +08001370 return this_cpu_read(xen_cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001371}
1372
1373static void set_current_cr3(void *v)
1374{
Alex Shi2113f462012-01-13 23:53:35 +08001375 this_cpu_write(xen_current_cr3, (unsigned long)v);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001376}
1377
1378static void __xen_write_cr3(bool kernel, unsigned long cr3)
1379{
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001380 struct mmuext_op op;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001381 unsigned long mfn;
1382
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001383 trace_xen_mmu_write_cr3(kernel, cr3);
1384
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001385 if (cr3)
1386 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1387 else
1388 mfn = 0;
1389
1390 WARN_ON(mfn == 0 && kernel);
1391
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001392 op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1393 op.arg1.mfn = mfn;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001394
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001395 xen_extend_mmuext_op(&op);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001396
1397 if (kernel) {
Alex Shi2113f462012-01-13 23:53:35 +08001398 this_cpu_write(xen_cr3, cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001399
1400 /* Update xen_current_cr3 once the batch has actually
1401 been submitted. */
1402 xen_mc_callback(set_current_cr3, (void *)cr3);
1403 }
1404}
1405
1406static void xen_write_cr3(unsigned long cr3)
1407{
1408 BUG_ON(preemptible());
1409
1410 xen_mc_batch(); /* disables interrupts */
1411
1412 /* Update while interrupts are disabled, so its atomic with
1413 respect to ipis */
Alex Shi2113f462012-01-13 23:53:35 +08001414 this_cpu_write(xen_cr3, cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001415
1416 __xen_write_cr3(true, cr3);
1417
1418#ifdef CONFIG_X86_64
1419 {
1420 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1421 if (user_pgd)
1422 __xen_write_cr3(false, __pa(user_pgd));
1423 else
1424 __xen_write_cr3(false, 0);
1425 }
1426#endif
1427
1428 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1429}
1430
1431static int xen_pgd_alloc(struct mm_struct *mm)
1432{
1433 pgd_t *pgd = mm->pgd;
1434 int ret = 0;
1435
1436 BUG_ON(PagePinned(virt_to_page(pgd)));
1437
1438#ifdef CONFIG_X86_64
1439 {
1440 struct page *page = virt_to_page(pgd);
1441 pgd_t *user_pgd;
1442
1443 BUG_ON(page->private != 0);
1444
1445 ret = -ENOMEM;
1446
1447 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1448 page->private = (unsigned long)user_pgd;
1449
1450 if (user_pgd != NULL) {
1451 user_pgd[pgd_index(VSYSCALL_START)] =
1452 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1453 ret = 0;
1454 }
1455
1456 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1457 }
1458#endif
1459
1460 return ret;
1461}
1462
1463static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1464{
1465#ifdef CONFIG_X86_64
1466 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1467
1468 if (user_pgd)
1469 free_page((unsigned long)user_pgd);
1470#endif
1471}
1472
Stefano Stabelliniee176452011-04-19 14:47:31 +01001473#ifdef CONFIG_X86_32
Daniel Kiper3f5089532011-05-12 17:19:53 -04001474static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001475{
1476 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1477 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1478 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1479 pte_val_ma(pte));
Stefano Stabelliniee176452011-04-19 14:47:31 +01001480
1481 return pte;
1482}
1483#else /* CONFIG_X86_64 */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001484static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
Stefano Stabelliniee176452011-04-19 14:47:31 +01001485{
1486 unsigned long pfn = pte_pfn(pte);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -07001487
1488 /*
1489 * If the new pfn is within the range of the newly allocated
1490 * kernel pagetable, and it isn't being mapped into an
Stefano Stabellinid8aa5ec2011-03-09 14:22:05 +00001491 * early_ioremap fixmap slot as a freshly allocated page, make sure
1492 * it is RO.
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -07001493 */
Stefano Stabellinid8aa5ec2011-03-09 14:22:05 +00001494 if (((!is_early_ioremap_ptep(ptep) &&
Stefano Stabellinib9269dc2011-04-12 12:19:49 +01001495 pfn >= pgt_buf_start && pfn < pgt_buf_top)) ||
Stefano Stabellinid8aa5ec2011-03-09 14:22:05 +00001496 (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1)))
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -07001497 pte = pte_wrprotect(pte);
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001498
1499 return pte;
1500}
Stefano Stabelliniee176452011-04-19 14:47:31 +01001501#endif /* CONFIG_X86_64 */
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001502
David Vrabeld095d432012-07-09 11:39:05 +01001503/*
1504 * Init-time set_pte while constructing initial pagetables, which
1505 * doesn't allow RO page table pages to be remapped RW.
1506 *
David Vrabel66a27dd2012-07-09 11:39:06 +01001507 * If there is no MFN for this PFN then this page is initially
1508 * ballooned out so clear the PTE (as in decrease_reservation() in
1509 * drivers/xen/balloon.c).
1510 *
David Vrabeld095d432012-07-09 11:39:05 +01001511 * Many of these PTE updates are done on unpinned and writable pages
1512 * and doing a hypercall for these is unnecessary and expensive. At
1513 * this point it is not possible to tell if a page is pinned or not,
1514 * so always write the PTE directly and rely on Xen trapping and
1515 * emulating any updates as necessary.
1516 */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001517static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001518{
David Vrabel66a27dd2012-07-09 11:39:06 +01001519 if (pte_mfn(pte) != INVALID_P2M_ENTRY)
1520 pte = mask_rw_pte(ptep, pte);
1521 else
1522 pte = __pte_ma(0);
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001523
David Vrabeld095d432012-07-09 11:39:05 +01001524 native_set_pte(ptep, pte);
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001525}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001526
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001527static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1528{
1529 struct mmuext_op op;
1530 op.cmd = cmd;
1531 op.arg1.mfn = pfn_to_mfn(pfn);
1532 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1533 BUG();
1534}
1535
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001536/* Early in boot, while setting up the initial pagetable, assume
1537 everything is pinned. */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001538static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001539{
1540#ifdef CONFIG_FLATMEM
1541 BUG_ON(mem_map); /* should only be used early */
1542#endif
1543 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001544 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1545}
1546
1547/* Used for pmd and pud */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001548static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001549{
1550#ifdef CONFIG_FLATMEM
1551 BUG_ON(mem_map); /* should only be used early */
1552#endif
1553 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001554}
1555
1556/* Early release_pte assumes that all pts are pinned, since there's
1557 only init_mm and anything attached to that is pinned. */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001558static void __init xen_release_pte_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001559{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001560 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001561 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1562}
1563
Daniel Kiper3f5089532011-05-12 17:19:53 -04001564static void __init xen_release_pmd_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001565{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001566 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001567}
1568
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001569static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1570{
1571 struct multicall_space mcs;
1572 struct mmuext_op *op;
1573
1574 mcs = __xen_mc_entry(sizeof(*op));
1575 op = mcs.args;
1576 op->cmd = cmd;
1577 op->arg1.mfn = pfn_to_mfn(pfn);
1578
1579 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1580}
1581
1582static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1583{
1584 struct multicall_space mcs;
1585 unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1586
1587 mcs = __xen_mc_entry(0);
1588 MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1589 pfn_pte(pfn, prot), 0);
1590}
1591
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001592/* This needs to make sure the new pte page is pinned iff its being
1593 attached to a pinned pagetable. */
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001594static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1595 unsigned level)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001596{
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001597 bool pinned = PagePinned(virt_to_page(mm->pgd));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001598
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001599 trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001600
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001601 if (pinned) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001602 struct page *page = pfn_to_page(pfn);
1603
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001604 SetPagePinned(page);
1605
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001606 if (!PageHighMem(page)) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001607 xen_mc_batch();
1608
1609 __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1610
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001611 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001612 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1613
1614 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001615 } else {
1616 /* make sure there are no stray mappings of
1617 this page */
1618 kmap_flush_unused();
1619 }
1620 }
1621}
1622
1623static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1624{
1625 xen_alloc_ptpage(mm, pfn, PT_PTE);
1626}
1627
1628static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1629{
1630 xen_alloc_ptpage(mm, pfn, PT_PMD);
1631}
1632
1633/* This should never happen until we're OK to use struct page */
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001634static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001635{
1636 struct page *page = pfn_to_page(pfn);
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001637 bool pinned = PagePinned(page);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001638
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001639 trace_xen_mmu_release_ptpage(pfn, level, pinned);
1640
1641 if (pinned) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001642 if (!PageHighMem(page)) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001643 xen_mc_batch();
1644
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001645 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001646 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1647
1648 __set_pfn_prot(pfn, PAGE_KERNEL);
1649
1650 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001651 }
1652 ClearPagePinned(page);
1653 }
1654}
1655
1656static void xen_release_pte(unsigned long pfn)
1657{
1658 xen_release_ptpage(pfn, PT_PTE);
1659}
1660
1661static void xen_release_pmd(unsigned long pfn)
1662{
1663 xen_release_ptpage(pfn, PT_PMD);
1664}
1665
1666#if PAGETABLE_LEVELS == 4
1667static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1668{
1669 xen_alloc_ptpage(mm, pfn, PT_PUD);
1670}
1671
1672static void xen_release_pud(unsigned long pfn)
1673{
1674 xen_release_ptpage(pfn, PT_PUD);
1675}
1676#endif
1677
1678void __init xen_reserve_top(void)
1679{
1680#ifdef CONFIG_X86_32
1681 unsigned long top = HYPERVISOR_VIRT_START;
1682 struct xen_platform_parameters pp;
1683
1684 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1685 top = pp.virt_start;
1686
1687 reserve_top_address(-top);
1688#endif /* CONFIG_X86_32 */
1689}
1690
1691/*
1692 * Like __va(), but returns address in the kernel mapping (which is
1693 * all we have until the physical memory mapping has been set up.
1694 */
1695static void *__ka(phys_addr_t paddr)
1696{
1697#ifdef CONFIG_X86_64
1698 return (void *)(paddr + __START_KERNEL_map);
1699#else
1700 return __va(paddr);
1701#endif
1702}
1703
1704/* Convert a machine address to physical address */
1705static unsigned long m2p(phys_addr_t maddr)
1706{
1707 phys_addr_t paddr;
1708
1709 maddr &= PTE_PFN_MASK;
1710 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1711
1712 return paddr;
1713}
1714
1715/* Convert a machine address to kernel virtual */
1716static void *m2v(phys_addr_t maddr)
1717{
1718 return __ka(m2p(maddr));
1719}
1720
Juan Quintela4ec53872010-09-02 15:45:43 +01001721/* Set the page permissions on an identity-mapped pages */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001722static void set_page_prot(void *addr, pgprot_t prot)
1723{
1724 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1725 pte_t pte = pfn_pte(pfn, prot);
1726
1727 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1728 BUG();
1729}
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -04001730#ifdef CONFIG_X86_32
Daniel Kiper3f5089532011-05-12 17:19:53 -04001731static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001732{
1733 unsigned pmdidx, pteidx;
1734 unsigned ident_pte;
1735 unsigned long pfn;
1736
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001737 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1738 PAGE_SIZE);
1739
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001740 ident_pte = 0;
1741 pfn = 0;
1742 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1743 pte_t *pte_page;
1744
1745 /* Reuse or allocate a page of ptes */
1746 if (pmd_present(pmd[pmdidx]))
1747 pte_page = m2v(pmd[pmdidx].pmd);
1748 else {
1749 /* Check for free pte pages */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001750 if (ident_pte == LEVEL1_IDENT_ENTRIES)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001751 break;
1752
1753 pte_page = &level1_ident_pgt[ident_pte];
1754 ident_pte += PTRS_PER_PTE;
1755
1756 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1757 }
1758
1759 /* Install mappings */
1760 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1761 pte_t pte;
1762
Stefano Stabellinia91d9282011-06-03 09:51:34 +00001763#ifdef CONFIG_X86_32
1764 if (pfn > max_pfn_mapped)
1765 max_pfn_mapped = pfn;
1766#endif
1767
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001768 if (!pte_none(pte_page[pteidx]))
1769 continue;
1770
1771 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1772 pte_page[pteidx] = pte;
1773 }
1774 }
1775
1776 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1777 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1778
1779 set_page_prot(pmd, PAGE_KERNEL_RO);
1780}
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -04001781#endif
Ian Campbell7e775062010-09-30 12:37:26 +01001782void __init xen_setup_machphys_mapping(void)
1783{
1784 struct xen_machphys_mapping mapping;
Ian Campbell7e775062010-09-30 12:37:26 +01001785
1786 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1787 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
Jan Beulichccbcdf72011-08-16 15:07:41 +01001788 machine_to_phys_nr = mapping.max_mfn + 1;
Ian Campbell7e775062010-09-30 12:37:26 +01001789 } else {
Jan Beulichccbcdf72011-08-16 15:07:41 +01001790 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
Ian Campbell7e775062010-09-30 12:37:26 +01001791 }
Jan Beulichccbcdf72011-08-16 15:07:41 +01001792#ifdef CONFIG_X86_32
Jan Beulich61cca2f2011-09-15 08:52:40 +01001793 WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
1794 < machine_to_phys_mapping);
Jan Beulichccbcdf72011-08-16 15:07:41 +01001795#endif
Ian Campbell7e775062010-09-30 12:37:26 +01001796}
1797
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001798#ifdef CONFIG_X86_64
1799static void convert_pfn_mfn(void *v)
1800{
1801 pte_t *pte = v;
1802 int i;
1803
1804 /* All levels are converted the same way, so just treat them
1805 as ptes. */
1806 for (i = 0; i < PTRS_PER_PTE; i++)
1807 pte[i] = xen_make_pte(pte[i].pte);
1808}
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001809static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
1810 unsigned long addr)
1811{
1812 if (*pt_base == PFN_DOWN(__pa(addr))) {
1813 set_page_prot((void *)addr, PAGE_KERNEL);
1814 clear_page((void *)addr);
1815 (*pt_base)++;
1816 }
1817 if (*pt_end == PFN_DOWN(__pa(addr))) {
1818 set_page_prot((void *)addr, PAGE_KERNEL);
1819 clear_page((void *)addr);
1820 (*pt_end)--;
1821 }
1822}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001823/*
Lucas De Marchi0d2eb442011-03-17 16:24:16 -03001824 * Set up the initial kernel pagetable.
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001825 *
1826 * We can construct this by grafting the Xen provided pagetable into
1827 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1828 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
1829 * means that only the kernel has a physical mapping to start with -
1830 * but that's enough to get __va working. We need to fill in the rest
1831 * of the physical mapping once some sort of allocator has been set
1832 * up.
1833 */
Konrad Rzeszutek Wilk3699aad2012-06-28 22:47:35 -04001834void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001835{
1836 pud_t *l3;
1837 pmd_t *l2;
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001838 unsigned long addr[3];
1839 unsigned long pt_base, pt_end;
1840 unsigned i;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001841
Stefano Stabellini14988a42011-02-18 11:32:40 +00001842 /* max_pfn_mapped is the last pfn mapped in the initial memory
1843 * mappings. Considering that on Xen after the kernel mappings we
1844 * have the mappings of some pages that don't exist in pfn space, we
1845 * set max_pfn_mapped to the last real pfn mapped. */
1846 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1847
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001848 pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
1849 pt_end = pt_base + xen_start_info->nr_pt_frames;
1850
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001851 /* Zap identity mapping */
1852 init_level4_pgt[0] = __pgd(0);
1853
1854 /* Pre-constructed entries are in pfn, so convert to mfn */
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001855 /* L4[272] -> level3_ident_pgt
1856 * L4[511] -> level3_kernel_pgt */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001857 convert_pfn_mfn(init_level4_pgt);
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001858
1859 /* L3_i[0] -> level2_ident_pgt */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001860 convert_pfn_mfn(level3_ident_pgt);
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001861 /* L3_k[510] -> level2_kernel_pgt
1862 * L3_i[511] -> level2_fixmap_pgt */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001863 convert_pfn_mfn(level3_kernel_pgt);
1864
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001865 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001866 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1867 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1868
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001869 addr[0] = (unsigned long)pgd;
1870 addr[1] = (unsigned long)l3;
1871 addr[2] = (unsigned long)l2;
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001872 /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
1873 * Both L4[272][0] and L4[511][511] have entries that point to the same
1874 * L2 (PMD) tables. Meaning that if you modify it in __va space
1875 * it will be also modified in the __ka space! (But if you just
1876 * modify the PMD table to point to other PTE's or none, then you
1877 * are OK - which is what cleanup_highmap does) */
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001878 copy_page(level2_ident_pgt, l2);
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001879 /* Graft it onto L4[511][511] */
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001880 copy_page(level2_kernel_pgt, l2);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001881
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001882 /* Get [511][510] and graft that in level2_fixmap_pgt */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001883 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1884 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001885 copy_page(level2_fixmap_pgt, l2);
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001886 /* Note that we don't do anything with level1_fixmap_pgt which
1887 * we don't need. */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001888
1889 /* Make pagetable pieces RO */
1890 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1891 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1892 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1893 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -04001894 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001895 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1896 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1897
1898 /* Pin down new L4 */
1899 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1900 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1901
1902 /* Unpin Xen-provided one */
1903 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1904
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001905 /*
1906 * At this stage there can be no user pgd, and no page
1907 * structure to attach it to, so make sure we just set kernel
1908 * pgd.
1909 */
1910 xen_mc_batch();
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001911 __xen_write_cr3(true, __pa(init_level4_pgt));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001912 xen_mc_issue(PARAVIRT_LAZY_CPU);
1913
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001914 /* We can't that easily rip out L3 and L2, as the Xen pagetables are
1915 * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
1916 * the initial domain. For guests using the toolstack, they are in:
1917 * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
1918 * rip out the [L4] (pgd), but for guests we shave off three pages.
1919 */
1920 for (i = 0; i < ARRAY_SIZE(addr); i++)
1921 check_pt_base(&pt_base, &pt_end, addr[i]);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001922
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001923 /* Our (by three pages) smaller Xen pagetable that we are using */
1924 memblock_reserve(PFN_PHYS(pt_base), (pt_end - pt_base) * PAGE_SIZE);
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001925 /* Revector the xen_start_info */
1926 xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001927}
1928#else /* !CONFIG_X86_64 */
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001929static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
1930static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
1931
Daniel Kiper3f5089532011-05-12 17:19:53 -04001932static void __init xen_write_cr3_init(unsigned long cr3)
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001933{
1934 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
1935
1936 BUG_ON(read_cr3() != __pa(initial_page_table));
1937 BUG_ON(cr3 != __pa(swapper_pg_dir));
1938
1939 /*
1940 * We are switching to swapper_pg_dir for the first time (from
1941 * initial_page_table) and therefore need to mark that page
1942 * read-only and then pin it.
1943 *
1944 * Xen disallows sharing of kernel PMDs for PAE
1945 * guests. Therefore we must copy the kernel PMD from
1946 * initial_page_table into a new kernel PMD to be used in
1947 * swapper_pg_dir.
1948 */
1949 swapper_kernel_pmd =
1950 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001951 copy_page(swapper_kernel_pmd, initial_kernel_pmd);
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001952 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
1953 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
1954 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
1955
1956 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1957 xen_write_cr3(cr3);
1958 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
1959
1960 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
1961 PFN_DOWN(__pa(initial_page_table)));
1962 set_page_prot(initial_page_table, PAGE_KERNEL);
1963 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
1964
1965 pv_mmu_ops.write_cr3 = &xen_write_cr3;
1966}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001967
Konrad Rzeszutek Wilk3699aad2012-06-28 22:47:35 -04001968void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001969{
1970 pmd_t *kernel_pmd;
1971
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001972 initial_kernel_pmd =
1973 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
Jeremy Fitzhardingef0991802010-08-26 16:16:28 -07001974
Stefano Stabellinia91d9282011-06-03 09:51:34 +00001975 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
1976 xen_start_info->nr_pt_frames * PAGE_SIZE +
1977 512*1024);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001978
1979 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001980 copy_page(initial_kernel_pmd, kernel_pmd);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001981
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001982 xen_map_identity_early(initial_kernel_pmd, max_pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001983
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001984 copy_page(initial_page_table, pgd);
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001985 initial_page_table[KERNEL_PGD_BOUNDARY] =
1986 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001987
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001988 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
1989 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001990 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
1991
1992 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1993
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001994 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
1995 PFN_DOWN(__pa(initial_page_table)));
1996 xen_write_cr3(__pa(initial_page_table));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001997
Tejun Heo24aa0782011-07-12 11:16:06 +02001998 memblock_reserve(__pa(xen_start_info->pt_base),
Konrad Rzeszutek Wilkdc6821e2012-01-07 21:27:38 -05001999 xen_start_info->nr_pt_frames * PAGE_SIZE);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002000}
2001#endif /* CONFIG_X86_64 */
2002
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002003static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
2004
Masami Hiramatsu3b3809a2009-04-09 10:55:33 -07002005static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002006{
2007 pte_t pte;
2008
2009 phys >>= PAGE_SHIFT;
2010
2011 switch (idx) {
2012 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
2013#ifdef CONFIG_X86_F00F_BUG
2014 case FIX_F00F_IDT:
2015#endif
2016#ifdef CONFIG_X86_32
2017 case FIX_WP_TEST:
2018 case FIX_VDSO:
2019# ifdef CONFIG_HIGHMEM
2020 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
2021# endif
2022#else
2023 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
Andy Lutomirski5d5791a2011-08-03 09:31:52 -04002024 case VVAR_PAGE:
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002025#endif
Jeremy Fitzhardinge3ecb1b72009-03-07 23:48:41 -08002026 case FIX_TEXT_POKE0:
2027 case FIX_TEXT_POKE1:
2028 /* All local page mappings */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002029 pte = pfn_pte(phys, prot);
2030 break;
2031
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002032#ifdef CONFIG_X86_LOCAL_APIC
2033 case FIX_APIC_BASE: /* maps dummy local APIC */
2034 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2035 break;
2036#endif
2037
2038#ifdef CONFIG_X86_IO_APIC
2039 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
2040 /*
2041 * We just don't map the IO APIC - all access is via
2042 * hypercalls. Keep the address in the pte for reference.
2043 */
Konrad Rzeszutek Wilk27abd142012-04-16 13:53:40 -04002044 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002045 break;
2046#endif
2047
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08002048 case FIX_PARAVIRT_BOOTMAP:
2049 /* This is an MFN, but it isn't an IO mapping from the
2050 IO domain */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002051 pte = mfn_pte(phys, prot);
2052 break;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08002053
2054 default:
2055 /* By default, set_fixmap is used for hardware mappings */
2056 pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP));
2057 break;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002058 }
2059
2060 __native_set_fixmap(idx, pte);
2061
2062#ifdef CONFIG_X86_64
2063 /* Replicate changes to map the vsyscall page into the user
2064 pagetable vsyscall mapping. */
Andy Lutomirski5d5791a2011-08-03 09:31:52 -04002065 if ((idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) ||
2066 idx == VVAR_PAGE) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002067 unsigned long vaddr = __fix_to_virt(idx);
2068 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
2069 }
2070#endif
2071}
2072
Daniel Kiper3f5089532011-05-12 17:19:53 -04002073static void __init xen_post_allocator_init(void)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002074{
2075 pv_mmu_ops.set_pte = xen_set_pte;
2076 pv_mmu_ops.set_pmd = xen_set_pmd;
2077 pv_mmu_ops.set_pud = xen_set_pud;
2078#if PAGETABLE_LEVELS == 4
2079 pv_mmu_ops.set_pgd = xen_set_pgd;
2080#endif
2081
2082 /* This will work as long as patching hasn't happened yet
2083 (which it hasn't) */
2084 pv_mmu_ops.alloc_pte = xen_alloc_pte;
2085 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
2086 pv_mmu_ops.release_pte = xen_release_pte;
2087 pv_mmu_ops.release_pmd = xen_release_pmd;
2088#if PAGETABLE_LEVELS == 4
2089 pv_mmu_ops.alloc_pud = xen_alloc_pud;
2090 pv_mmu_ops.release_pud = xen_release_pud;
2091#endif
2092
2093#ifdef CONFIG_X86_64
2094 SetPagePinned(virt_to_page(level3_user_vsyscall));
2095#endif
2096 xen_mark_init_mm_pinned();
2097}
2098
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002099static void xen_leave_lazy_mmu(void)
2100{
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08002101 preempt_disable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002102 xen_mc_flush();
2103 paravirt_leave_lazy_mmu();
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08002104 preempt_enable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002105}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002106
Daniel Kiper3f5089532011-05-12 17:19:53 -04002107static const struct pv_mmu_ops xen_mmu_ops __initconst = {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002108 .read_cr2 = xen_read_cr2,
2109 .write_cr2 = xen_write_cr2,
2110
2111 .read_cr3 = xen_read_cr3,
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002112#ifdef CONFIG_X86_32
2113 .write_cr3 = xen_write_cr3_init,
2114#else
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002115 .write_cr3 = xen_write_cr3,
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002116#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002117
2118 .flush_tlb_user = xen_flush_tlb,
2119 .flush_tlb_kernel = xen_flush_tlb,
2120 .flush_tlb_single = xen_flush_tlb_single,
2121 .flush_tlb_others = xen_flush_tlb_others,
2122
2123 .pte_update = paravirt_nop,
2124 .pte_update_defer = paravirt_nop,
2125
2126 .pgd_alloc = xen_pgd_alloc,
2127 .pgd_free = xen_pgd_free,
2128
2129 .alloc_pte = xen_alloc_pte_init,
2130 .release_pte = xen_release_pte_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002131 .alloc_pmd = xen_alloc_pmd_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002132 .release_pmd = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002133
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002134 .set_pte = xen_set_pte_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002135 .set_pte_at = xen_set_pte_at,
2136 .set_pmd = xen_set_pmd_hyper,
2137
2138 .ptep_modify_prot_start = __ptep_modify_prot_start,
2139 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2140
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002141 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2142 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002143
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002144 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
2145 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002146
2147#ifdef CONFIG_X86_PAE
2148 .set_pte_atomic = xen_set_pte_atomic,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002149 .pte_clear = xen_pte_clear,
2150 .pmd_clear = xen_pmd_clear,
2151#endif /* CONFIG_X86_PAE */
2152 .set_pud = xen_set_pud_hyper,
2153
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002154 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2155 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002156
2157#if PAGETABLE_LEVELS == 4
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002158 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2159 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002160 .set_pgd = xen_set_pgd_hyper,
2161
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002162 .alloc_pud = xen_alloc_pmd_init,
2163 .release_pud = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002164#endif /* PAGETABLE_LEVELS == 4 */
2165
2166 .activate_mm = xen_activate_mm,
2167 .dup_mmap = xen_dup_mmap,
2168 .exit_mmap = xen_exit_mmap,
2169
2170 .lazy_mode = {
2171 .enter = paravirt_enter_lazy_mmu,
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002172 .leave = xen_leave_lazy_mmu,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002173 },
2174
2175 .set_fixmap = xen_set_fixmap,
2176};
2177
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002178void __init xen_init_mmu_ops(void)
2179{
Stefano Stabellini279b7062011-04-14 15:49:41 +01002180 x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve;
Attilio Rao7737b212012-08-21 21:22:38 +01002181 x86_init.paging.pagetable_init = xen_pagetable_init;
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002182 pv_mmu_ops = xen_mmu_ops;
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -07002183
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002184 memset(dummy_mapping, 0xff, PAGE_SIZE);
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002185}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002186
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002187/* Protected by xen_reservation_lock. */
2188#define MAX_CONTIG_ORDER 9 /* 2MB */
2189static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2190
2191#define VOID_PTE (mfn_pte(0, __pgprot(0)))
2192static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2193 unsigned long *in_frames,
2194 unsigned long *out_frames)
2195{
2196 int i;
2197 struct multicall_space mcs;
2198
2199 xen_mc_batch();
2200 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2201 mcs = __xen_mc_entry(0);
2202
2203 if (in_frames)
2204 in_frames[i] = virt_to_mfn(vaddr);
2205
2206 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
Konrad Rzeszutek Wilk6eaa4122011-01-18 20:09:41 -05002207 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002208
2209 if (out_frames)
2210 out_frames[i] = virt_to_pfn(vaddr);
2211 }
2212 xen_mc_issue(0);
2213}
2214
2215/*
2216 * Update the pfn-to-mfn mappings for a virtual address range, either to
2217 * point to an array of mfns, or contiguously from a single starting
2218 * mfn.
2219 */
2220static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2221 unsigned long *mfns,
2222 unsigned long first_mfn)
2223{
2224 unsigned i, limit;
2225 unsigned long mfn;
2226
2227 xen_mc_batch();
2228
2229 limit = 1u << order;
2230 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2231 struct multicall_space mcs;
2232 unsigned flags;
2233
2234 mcs = __xen_mc_entry(0);
2235 if (mfns)
2236 mfn = mfns[i];
2237 else
2238 mfn = first_mfn + i;
2239
2240 if (i < (limit - 1))
2241 flags = 0;
2242 else {
2243 if (order == 0)
2244 flags = UVMF_INVLPG | UVMF_ALL;
2245 else
2246 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2247 }
2248
2249 MULTI_update_va_mapping(mcs.mc, vaddr,
2250 mfn_pte(mfn, PAGE_KERNEL), flags);
2251
2252 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2253 }
2254
2255 xen_mc_issue(0);
2256}
2257
2258/*
2259 * Perform the hypercall to exchange a region of our pfns to point to
2260 * memory with the required contiguous alignment. Takes the pfns as
2261 * input, and populates mfns as output.
2262 *
2263 * Returns a success code indicating whether the hypervisor was able to
2264 * satisfy the request or not.
2265 */
2266static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2267 unsigned long *pfns_in,
2268 unsigned long extents_out,
2269 unsigned int order_out,
2270 unsigned long *mfns_out,
2271 unsigned int address_bits)
2272{
2273 long rc;
2274 int success;
2275
2276 struct xen_memory_exchange exchange = {
2277 .in = {
2278 .nr_extents = extents_in,
2279 .extent_order = order_in,
2280 .extent_start = pfns_in,
2281 .domid = DOMID_SELF
2282 },
2283 .out = {
2284 .nr_extents = extents_out,
2285 .extent_order = order_out,
2286 .extent_start = mfns_out,
2287 .address_bits = address_bits,
2288 .domid = DOMID_SELF
2289 }
2290 };
2291
2292 BUG_ON(extents_in << order_in != extents_out << order_out);
2293
2294 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2295 success = (exchange.nr_exchanged == extents_in);
2296
2297 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2298 BUG_ON(success && (rc != 0));
2299
2300 return success;
2301}
2302
2303int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
2304 unsigned int address_bits)
2305{
2306 unsigned long *in_frames = discontig_frames, out_frame;
2307 unsigned long flags;
2308 int success;
2309
2310 /*
2311 * Currently an auto-translated guest will not perform I/O, nor will
2312 * it require PAE page directories below 4GB. Therefore any calls to
2313 * this function are redundant and can be ignored.
2314 */
2315
2316 if (xen_feature(XENFEAT_auto_translated_physmap))
2317 return 0;
2318
2319 if (unlikely(order > MAX_CONTIG_ORDER))
2320 return -ENOMEM;
2321
2322 memset((void *) vstart, 0, PAGE_SIZE << order);
2323
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002324 spin_lock_irqsave(&xen_reservation_lock, flags);
2325
2326 /* 1. Zap current PTEs, remembering MFNs. */
2327 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2328
2329 /* 2. Get a new contiguous memory extent. */
2330 out_frame = virt_to_pfn(vstart);
2331 success = xen_exchange_memory(1UL << order, 0, in_frames,
2332 1, order, &out_frame,
2333 address_bits);
2334
2335 /* 3. Map the new extent in place of old pages. */
2336 if (success)
2337 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2338 else
2339 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2340
2341 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2342
2343 return success ? 0 : -ENOMEM;
2344}
2345EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2346
2347void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
2348{
2349 unsigned long *out_frames = discontig_frames, in_frame;
2350 unsigned long flags;
2351 int success;
2352
2353 if (xen_feature(XENFEAT_auto_translated_physmap))
2354 return;
2355
2356 if (unlikely(order > MAX_CONTIG_ORDER))
2357 return;
2358
2359 memset((void *) vstart, 0, PAGE_SIZE << order);
2360
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002361 spin_lock_irqsave(&xen_reservation_lock, flags);
2362
2363 /* 1. Find start MFN of contiguous extent. */
2364 in_frame = virt_to_mfn(vstart);
2365
2366 /* 2. Zap current PTEs. */
2367 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2368
2369 /* 3. Do the exchange for non-contiguous MFNs. */
2370 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2371 0, out_frames, 0);
2372
2373 /* 4. Map new pages in place of old pages. */
2374 if (success)
2375 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2376 else
2377 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2378
2379 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2380}
2381EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2382
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002383#ifdef CONFIG_XEN_PVHVM
Stefano Stabellini59151002010-06-17 14:22:52 +01002384static void xen_hvm_exit_mmap(struct mm_struct *mm)
2385{
2386 struct xen_hvm_pagetable_dying a;
2387 int rc;
2388
2389 a.domid = DOMID_SELF;
2390 a.gpa = __pa(mm->pgd);
2391 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2392 WARN_ON_ONCE(rc < 0);
2393}
2394
2395static int is_pagetable_dying_supported(void)
2396{
2397 struct xen_hvm_pagetable_dying a;
2398 int rc = 0;
2399
2400 a.domid = DOMID_SELF;
2401 a.gpa = 0x00;
2402 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2403 if (rc < 0) {
2404 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
2405 return 0;
2406 }
2407 return 1;
2408}
2409
2410void __init xen_hvm_init_mmu_ops(void)
2411{
2412 if (is_pagetable_dying_supported())
2413 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
2414}
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002415#endif
Stefano Stabellini59151002010-06-17 14:22:52 +01002416
Ian Campbellde1ef202009-05-21 10:09:46 +01002417#define REMAP_BATCH_SIZE 16
2418
2419struct remap_data {
2420 unsigned long mfn;
2421 pgprot_t prot;
2422 struct mmu_update *mmu_update;
2423};
2424
2425static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2426 unsigned long addr, void *data)
2427{
2428 struct remap_data *rmd = data;
2429 pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
2430
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -08002431 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
Ian Campbellde1ef202009-05-21 10:09:46 +01002432 rmd->mmu_update->val = pte_val_ma(pte);
2433 rmd->mmu_update++;
2434
2435 return 0;
2436}
2437
2438int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2439 unsigned long addr,
2440 unsigned long mfn, int nr,
2441 pgprot_t prot, unsigned domid)
2442{
2443 struct remap_data rmd;
2444 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
2445 int batch;
2446 unsigned long range;
2447 int err = 0;
2448
Stefano Stabellini1a1d4332012-08-22 17:20:16 +01002449 if (xen_feature(XENFEAT_auto_translated_physmap))
2450 return -EINVAL;
2451
Ian Campbellde1ef202009-05-21 10:09:46 +01002452 prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
2453
Stefano Stabellinie060e7af2010-11-11 12:37:43 -08002454 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) ==
2455 (VM_PFNMAP | VM_RESERVED | VM_IO)));
Ian Campbellde1ef202009-05-21 10:09:46 +01002456
2457 rmd.mfn = mfn;
2458 rmd.prot = prot;
2459
2460 while (nr) {
2461 batch = min(REMAP_BATCH_SIZE, nr);
2462 range = (unsigned long)batch << PAGE_SHIFT;
2463
2464 rmd.mmu_update = mmu_update;
2465 err = apply_to_page_range(vma->vm_mm, addr, range,
2466 remap_area_mfn_pte_fn, &rmd);
2467 if (err)
2468 goto out;
2469
David Vrabel69870a82012-08-30 13:58:11 +01002470 err = HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid);
2471 if (err < 0)
Ian Campbellde1ef202009-05-21 10:09:46 +01002472 goto out;
2473
2474 nr -= batch;
2475 addr += range;
2476 }
2477
2478 err = 0;
2479out:
2480
2481 flush_tlb_all();
2482
2483 return err;
2484}
2485EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);