blob: 5b2cb54425cec331676b3ddac785c43efb699ef2 [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070043#include <linux/debugfs.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044#include <linux/bug.h>
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -070045#include <linux/vmalloc.h>
Randy Dunlap44408ad2009-05-12 13:31:40 -070046#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090047#include <linux/gfp.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -070048#include <linux/memblock.h>
Konrad Rzeszutek Wilk2222e712010-12-22 08:57:30 -050049#include <linux/seq_file.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070050
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -080051#include <trace/events/xen.h>
52
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070053#include <asm/pgtable.h>
54#include <asm/tlbflush.h>
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -070055#include <asm/fixmap.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070056#include <asm/mmu_context.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080057#include <asm/setup.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070058#include <asm/paravirt.h>
Alex Nixon7347b402010-02-19 13:31:06 -050059#include <asm/e820.h>
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070060#include <asm/linkage.h>
Alex Nixon08bbc9d2009-02-09 12:05:46 -080061#include <asm/page.h>
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -070062#include <asm/init.h>
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -070063#include <asm/pat.h>
Andrew Jones900cba82009-12-18 10:31:31 +010064#include <asm/smp.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070065
66#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070067#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070068
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080069#include <xen/xen.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070070#include <xen/page.h>
71#include <xen/interface/xen.h>
Stefano Stabellini59151002010-06-17 14:22:52 +010072#include <xen/interface/hvm/hvm_op.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080073#include <xen/interface/version.h>
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080074#include <xen/interface/memory.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080075#include <xen/hvc-console.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070076
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070077#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070078#include "mmu.h"
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070079#include "debugfs.h"
80
Alex Nixon19001c82009-02-09 12:05:46 -080081/*
82 * Protects atomic reservation decrease/increase against concurrent increases.
Daniel Kiper06f521d2011-03-08 22:45:46 +010083 * Also protects non-atomic updates of current_pages and balloon lists.
Alex Nixon19001c82009-02-09 12:05:46 -080084 */
85DEFINE_SPINLOCK(xen_reservation_lock);
86
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -040087#ifdef CONFIG_X86_32
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080088/*
89 * Identity map, in addition to plain kernel map. This needs to be
90 * large enough to allocate page table pages to allocate the rest.
91 * Each page can map 2MB.
92 */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -070093#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
94static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -040095#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080096#ifdef CONFIG_X86_64
97/* l3 pud for userspace vsyscall mapping */
98static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
99#endif /* CONFIG_X86_64 */
100
101/*
102 * Note about cr3 (pagetable base) values:
103 *
104 * xen_cr3 contains the current logical cr3 value; it contains the
105 * last set cr3. This may not be the current effective cr3, because
106 * its update may be being lazily deferred. However, a vcpu looking
107 * at its own cr3 can use this value knowing that it everything will
108 * be self-consistent.
109 *
110 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
111 * hypercall to set the vcpu cr3 is complete (so it may be a little
112 * out of date, but it will never be set early). If one vcpu is
113 * looking at another vcpu's cr3 value, it should use this variable.
114 */
115DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
116DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
117
118
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700119/*
120 * Just beyond the highest usermode address. STACK_TOP_MAX has a
121 * redzone above it, so round it up to a PGD boundary.
122 */
123#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
124
Jeremy Fitzhardinge9976b392009-02-27 09:19:26 -0800125unsigned long arbitrary_virt_to_mfn(void *vaddr)
126{
127 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
128
129 return PFN_DOWN(maddr.maddr);
130}
131
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700132xmaddr_t arbitrary_virt_to_machine(void *vaddr)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700133{
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700134 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100135 unsigned int level;
Chris Lalancette9f32d212008-10-23 17:40:25 -0700136 pte_t *pte;
137 unsigned offset;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700138
Chris Lalancette9f32d212008-10-23 17:40:25 -0700139 /*
140 * if the PFN is in the linear mapped vaddr range, we can just use
141 * the (quick) virt_to_machine() p2m lookup
142 */
143 if (virt_addr_valid(vaddr))
144 return virt_to_machine(vaddr);
145
146 /* otherwise we have to do a (slower) full page-table walk */
147
148 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700149 BUG_ON(pte == NULL);
Chris Lalancette9f32d212008-10-23 17:40:25 -0700150 offset = address & ~PAGE_MASK;
Jeremy Fitzhardingeebd879e2008-07-08 15:06:54 -0700151 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700152}
Stephen Rothwellde23be52011-01-15 10:36:26 +1100153EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700154
155void make_lowmem_page_readonly(void *vaddr)
156{
157 pte_t *pte, ptev;
158 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100159 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700160
Ingo Molnarf0646e42008-01-30 13:33:43 +0100161 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700162 if (pte == NULL)
163 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700164
165 ptev = pte_wrprotect(*pte);
166
167 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
168 BUG();
169}
170
171void make_lowmem_page_readwrite(void *vaddr)
172{
173 pte_t *pte, ptev;
174 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100175 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700176
Ingo Molnarf0646e42008-01-30 13:33:43 +0100177 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700178 if (pte == NULL)
179 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700180
181 ptev = pte_mkwrite(*pte);
182
183 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
184 BUG();
185}
186
187
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700188static bool xen_page_pinned(void *ptr)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100189{
190 struct page *page = virt_to_page(ptr);
191
192 return PagePinned(page);
193}
194
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800195void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800196{
197 struct multicall_space mcs;
198 struct mmu_update *u;
199
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800200 trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
201
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800202 mcs = xen_mc_entry(sizeof(*u));
203 u = mcs.args;
204
205 /* ptep might be kmapped when using 32-bit HIGHPTE */
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -0800206 u->ptr = virt_to_machine(ptep).maddr;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800207 u->val = pte_val_ma(pteval);
208
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800209 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800210
211 xen_mc_issue(PARAVIRT_LAZY_MMU);
212}
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800213EXPORT_SYMBOL_GPL(xen_set_domain_pte);
214
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700215static void xen_extend_mmu_update(const struct mmu_update *update)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700216{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700217 struct multicall_space mcs;
218 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700219
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700220 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
221
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700222 if (mcs.mc != NULL) {
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700223 mcs.mc->args[1]++;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700224 } else {
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700225 mcs = __xen_mc_entry(sizeof(*u));
226 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
227 }
228
229 u = mcs.args;
230 *u = *update;
231}
232
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800233static void xen_extend_mmuext_op(const struct mmuext_op *op)
234{
235 struct multicall_space mcs;
236 struct mmuext_op *u;
237
238 mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
239
240 if (mcs.mc != NULL) {
241 mcs.mc->args[1]++;
242 } else {
243 mcs = __xen_mc_entry(sizeof(*u));
244 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
245 }
246
247 u = mcs.args;
248 *u = *op;
249}
250
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800251static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700252{
253 struct mmu_update u;
254
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700255 preempt_disable();
256
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700257 xen_mc_batch();
258
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700259 /* ptr may be ioremapped for 64-bit pagetable setup */
260 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700261 u.val = pmd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700262 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700263
264 xen_mc_issue(PARAVIRT_LAZY_MMU);
265
266 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700267}
268
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800269static void xen_set_pmd(pmd_t *ptr, pmd_t val)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100270{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800271 trace_xen_mmu_set_pmd(ptr, val);
272
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100273 /* If page is not pinned, we can just update the entry
274 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700275 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100276 *ptr = val;
277 return;
278 }
279
280 xen_set_pmd_hyper(ptr, val);
281}
282
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700283/*
284 * Associate a virtual page frame with a given physical page frame
285 * and protection flags for that frame.
286 */
287void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
288{
Jeremy Fitzhardinge836fe2f2008-07-08 15:06:58 -0700289 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700290}
291
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800292static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
293{
294 struct mmu_update u;
295
296 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
297 return false;
298
299 xen_mc_batch();
300
301 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
302 u.val = pte_val_ma(pteval);
303 xen_extend_mmu_update(&u);
304
305 xen_mc_issue(PARAVIRT_LAZY_MMU);
306
307 return true;
308}
309
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800310static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800311{
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800312 if (!xen_batched_set_pte(ptep, pteval))
313 native_set_pte(ptep, pteval);
314}
315
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800316static void xen_set_pte(pte_t *ptep, pte_t pteval)
317{
318 trace_xen_mmu_set_pte(ptep, pteval);
319 __xen_set_pte(ptep, pteval);
320}
321
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800322static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700323 pte_t *ptep, pte_t pteval)
324{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800325 trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
326 __xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700327}
328
Tejf63c2f22008-12-16 11:56:06 -0800329pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
330 unsigned long addr, pte_t *ptep)
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700331{
332 /* Just return the pte as-is. We preserve the bits on commit */
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800333 trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700334 return *ptep;
335}
336
337void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
338 pte_t *ptep, pte_t pte)
339{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700340 struct mmu_update u;
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700341
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800342 trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700343 xen_mc_batch();
344
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -0800345 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700346 u.val = pte_val_ma(pte);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700347 xen_extend_mmu_update(&u);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700348
349 xen_mc_issue(PARAVIRT_LAZY_MMU);
350}
351
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700352/* Assume pteval_t is equivalent to all the other *val_t types. */
353static pteval_t pte_mfn_to_pfn(pteval_t val)
354{
355 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700356 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Konrad Rzeszutek Wilkb7e5ffe2012-05-03 16:14:14 -0400357 unsigned long pfn = mfn_to_pfn(mfn);
358
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700359 pteval_t flags = val & PTE_FLAGS_MASK;
Konrad Rzeszutek Wilkb7e5ffe2012-05-03 16:14:14 -0400360 if (unlikely(pfn == ~0))
361 val = flags & ~_PAGE_PRESENT;
362 else
363 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700364 }
365
366 return val;
367}
368
369static pteval_t pte_pfn_to_mfn(pteval_t val)
370{
371 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700372 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700373 pteval_t flags = val & PTE_FLAGS_MASK;
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500374 unsigned long mfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700375
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500376 if (!xen_feature(XENFEAT_auto_translated_physmap))
377 mfn = get_phys_to_machine(pfn);
378 else
379 mfn = pfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700380 /*
381 * If there's no mfn for the pfn, then just create an
382 * empty non-present pte. Unfortunately this loses
383 * information about the original pfn, so
384 * pte_mfn_to_pfn is asymmetric.
385 */
386 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
387 mfn = 0;
388 flags = 0;
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500389 } else {
390 /*
391 * Paramount to do this test _after_ the
392 * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY &
393 * IDENTITY_FRAME_BIT resolves to true.
394 */
395 mfn &= ~FOREIGN_FRAME_BIT;
396 if (mfn & IDENTITY_FRAME_BIT) {
397 mfn &= ~IDENTITY_FRAME_BIT;
398 flags |= _PAGE_IOMAP;
399 }
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700400 }
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700401 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700402 }
403
404 return val;
405}
406
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800407static pteval_t iomap_pte(pteval_t val)
408{
409 if (val & _PAGE_PRESENT) {
410 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
411 pteval_t flags = val & PTE_FLAGS_MASK;
412
413 /* We assume the pte frame number is a MFN, so
414 just use it as-is. */
415 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
416 }
417
418 return val;
419}
420
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800421static pteval_t xen_pte_val(pte_t pte)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700422{
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700423 pteval_t pteval = pte.pte;
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500424#if 0
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700425 /* If this is a WC pte, convert back from Xen WC to Linux WC */
426 if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {
427 WARN_ON(!pat_enabled);
428 pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
429 }
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500430#endif
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700431 if (xen_initial_domain() && (pteval & _PAGE_IOMAP))
432 return pteval;
433
434 return pte_mfn_to_pfn(pteval);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700435}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800436PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700437
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800438static pgdval_t xen_pgd_val(pgd_t pgd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700439{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700440 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700441}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800442PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700443
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700444/*
445 * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7
446 * are reserved for now, to correspond to the Intel-reserved PAT
447 * types.
448 *
449 * We expect Linux's PAT set as follows:
450 *
451 * Idx PTE flags Linux Xen Default
452 * 0 WB WB WB
453 * 1 PWT WC WT WT
454 * 2 PCD UC- UC- UC-
455 * 3 PCD PWT UC UC UC
456 * 4 PAT WB WC WB
457 * 5 PAT PWT WC WP WT
458 * 6 PAT PCD UC- UC UC-
459 * 7 PAT PCD PWT UC UC UC
460 */
461
462void xen_set_pat(u64 pat)
463{
464 /* We expect Linux to use a PAT setting of
465 * UC UC- WC WB (ignoring the PAT flag) */
466 WARN_ON(pat != 0x0007010600070106ull);
467}
468
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800469static pte_t xen_make_pte(pteval_t pte)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700470{
Alex Nixon7347b402010-02-19 13:31:06 -0500471 phys_addr_t addr = (pte & PTE_PFN_MASK);
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500472#if 0
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700473 /* If Linux is trying to set a WC pte, then map to the Xen WC.
474 * If _PAGE_PAT is set, then it probably means it is really
475 * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope
476 * things work out OK...
477 *
478 * (We should never see kernel mappings with _PAGE_PSE set,
479 * but we could see hugetlbfs mappings, I think.).
480 */
481 if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) {
482 if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)
483 pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
484 }
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500485#endif
Alex Nixon7347b402010-02-19 13:31:06 -0500486 /*
487 * Unprivileged domains are allowed to do IOMAPpings for
488 * PCI passthrough, but not map ISA space. The ISA
489 * mappings are just dummy local mappings to keep other
490 * parts of the kernel happy.
491 */
492 if (unlikely(pte & _PAGE_IOMAP) &&
493 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800494 pte = iomap_pte(pte);
Alex Nixon7347b402010-02-19 13:31:06 -0500495 } else {
496 pte &= ~_PAGE_IOMAP;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800497 pte = pte_pfn_to_mfn(pte);
Alex Nixon7347b402010-02-19 13:31:06 -0500498 }
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800499
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700500 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700501}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800502PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700503
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800504static pgd_t xen_make_pgd(pgdval_t pgd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700505{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700506 pgd = pte_pfn_to_mfn(pgd);
507 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700508}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800509PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700510
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800511static pmdval_t xen_pmd_val(pmd_t pmd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700512{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700513 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700514}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800515PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100516
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800517static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700518{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700519 struct mmu_update u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700520
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700521 preempt_disable();
522
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700523 xen_mc_batch();
524
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700525 /* ptr may be ioremapped for 64-bit pagetable setup */
526 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700527 u.val = pud_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700528 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700529
530 xen_mc_issue(PARAVIRT_LAZY_MMU);
531
532 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700533}
534
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800535static void xen_set_pud(pud_t *ptr, pud_t val)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100536{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800537 trace_xen_mmu_set_pud(ptr, val);
538
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100539 /* If page is not pinned, we can just update the entry
540 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700541 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100542 *ptr = val;
543 return;
544 }
545
546 xen_set_pud_hyper(ptr, val);
547}
548
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700549#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800550static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700551{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800552 trace_xen_mmu_set_pte_atomic(ptep, pte);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700553 set_64bit((u64 *)ptep, native_pte_val(pte));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700554}
555
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800556static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700557{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800558 trace_xen_mmu_pte_clear(mm, addr, ptep);
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800559 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
560 native_pte_clear(mm, addr, ptep);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700561}
562
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800563static void xen_pmd_clear(pmd_t *pmdp)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700564{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800565 trace_xen_mmu_pmd_clear(pmdp);
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100566 set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700567}
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700568#endif /* CONFIG_X86_PAE */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700569
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800570static pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700571{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700572 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700573 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700574}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800575PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700576
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700577#if PAGETABLE_LEVELS == 4
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800578static pudval_t xen_pud_val(pud_t pud)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700579{
580 return pte_mfn_to_pfn(pud.pud);
581}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800582PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700583
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800584static pud_t xen_make_pud(pudval_t pud)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700585{
586 pud = pte_pfn_to_mfn(pud);
587
588 return native_make_pud(pud);
589}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800590PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700591
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800592static pgd_t *xen_get_user_pgd(pgd_t *pgd)
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700593{
594 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
595 unsigned offset = pgd - pgd_page;
596 pgd_t *user_ptr = NULL;
597
598 if (offset < pgd_index(USER_LIMIT)) {
599 struct page *page = virt_to_page(pgd_page);
600 user_ptr = (pgd_t *)page->private;
601 if (user_ptr)
602 user_ptr += offset;
603 }
604
605 return user_ptr;
606}
607
608static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700609{
610 struct mmu_update u;
611
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700612 u.ptr = virt_to_machine(ptr).maddr;
613 u.val = pgd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700614 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700615}
616
617/*
618 * Raw hypercall-based set_pgd, intended for in early boot before
619 * there's a page structure. This implies:
620 * 1. The only existing pagetable is the kernel's
621 * 2. It is always pinned
622 * 3. It has no user pagetable attached to it
623 */
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800624static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700625{
626 preempt_disable();
627
628 xen_mc_batch();
629
630 __xen_set_pgd_hyper(ptr, val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700631
632 xen_mc_issue(PARAVIRT_LAZY_MMU);
633
634 preempt_enable();
635}
636
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800637static void xen_set_pgd(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700638{
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700639 pgd_t *user_ptr = xen_get_user_pgd(ptr);
640
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800641 trace_xen_mmu_set_pgd(ptr, user_ptr, val);
642
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700643 /* If page is not pinned, we can just update the entry
644 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700645 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700646 *ptr = val;
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700647 if (user_ptr) {
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700648 WARN_ON(xen_page_pinned(user_ptr));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700649 *user_ptr = val;
650 }
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700651 return;
652 }
653
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700654 /* If it's pinned, then we can at least batch the kernel and
655 user updates together. */
656 xen_mc_batch();
657
658 __xen_set_pgd_hyper(ptr, val);
659 if (user_ptr)
660 __xen_set_pgd_hyper(user_ptr, val);
661
662 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700663}
664#endif /* PAGETABLE_LEVELS == 4 */
665
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700666/*
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700667 * (Yet another) pagetable walker. This one is intended for pinning a
668 * pagetable. This means that it walks a pagetable and calls the
669 * callback function on each page it finds making up the page table,
670 * at every level. It walks the entire pagetable, but it only bothers
671 * pinning pte pages which are below limit. In the normal case this
672 * will be STACK_TOP_MAX, but at boot we need to pin up to
673 * FIXADDR_TOP.
674 *
675 * For 32-bit the important bit is that we don't pin beyond there,
676 * because then we start getting into Xen's ptes.
677 *
678 * For 64-bit, we must skip the Xen hole in the middle of the address
679 * space, just after the big x86-64 virtual hole.
680 */
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000681static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
682 int (*func)(struct mm_struct *mm, struct page *,
683 enum pt_level),
684 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700685{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700686 int flush = 0;
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700687 unsigned hole_low, hole_high;
688 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
689 unsigned pgdidx, pudidx, pmdidx;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700690
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700691 /* The limit is the last byte to be touched */
692 limit--;
693 BUG_ON(limit >= FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700694
695 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700696 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700697
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700698 /*
699 * 64-bit has a great big hole in the middle of the address
700 * space, which contains the Xen mappings. On 32-bit these
701 * will end up making a zero-sized hole and so is a no-op.
702 */
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700703 hole_low = pgd_index(USER_LIMIT);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700704 hole_high = pgd_index(PAGE_OFFSET);
705
706 pgdidx_limit = pgd_index(limit);
707#if PTRS_PER_PUD > 1
708 pudidx_limit = pud_index(limit);
709#else
710 pudidx_limit = 0;
711#endif
712#if PTRS_PER_PMD > 1
713 pmdidx_limit = pmd_index(limit);
714#else
715 pmdidx_limit = 0;
716#endif
717
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700718 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700719 pud_t *pud;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700720
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700721 if (pgdidx >= hole_low && pgdidx < hole_high)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700722 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700723
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700724 if (!pgd_val(pgd[pgdidx]))
725 continue;
726
727 pud = pud_offset(&pgd[pgdidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700728
729 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700730 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700731
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700732 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700733 pmd_t *pmd;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700734
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700735 if (pgdidx == pgdidx_limit &&
736 pudidx > pudidx_limit)
737 goto out;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700738
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700739 if (pud_none(pud[pudidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700740 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700741
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700742 pmd = pmd_offset(&pud[pudidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700743
744 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700745 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700746
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700747 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
748 struct page *pte;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700749
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700750 if (pgdidx == pgdidx_limit &&
751 pudidx == pudidx_limit &&
752 pmdidx > pmdidx_limit)
753 goto out;
754
755 if (pmd_none(pmd[pmdidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700756 continue;
757
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700758 pte = pmd_page(pmd[pmdidx]);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700759 flush |= (*func)(mm, pte, PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700760 }
761 }
762 }
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700763
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700764out:
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700765 /* Do the top level last, so that the callbacks can use it as
766 a cue to do final things like tlb flushes. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700767 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700768
769 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700770}
771
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000772static int xen_pgd_walk(struct mm_struct *mm,
773 int (*func)(struct mm_struct *mm, struct page *,
774 enum pt_level),
775 unsigned long limit)
776{
777 return __xen_pgd_walk(mm, mm->pgd, func, limit);
778}
779
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700780/* If we're using split pte locks, then take the page's lock and
781 return a pointer to it. Otherwise return NULL. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700782static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700783{
784 spinlock_t *ptl = NULL;
785
Jeremy Fitzhardingef7d0b922008-09-09 15:43:22 -0700786#if USE_SPLIT_PTLOCKS
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700787 ptl = __pte_lockptr(page);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700788 spin_lock_nest_lock(ptl, &mm->page_table_lock);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700789#endif
790
791 return ptl;
792}
793
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700794static void xen_pte_unlock(void *v)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700795{
796 spinlock_t *ptl = v;
797 spin_unlock(ptl);
798}
799
800static void xen_do_pin(unsigned level, unsigned long pfn)
801{
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800802 struct mmuext_op op;
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700803
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800804 op.cmd = level;
805 op.arg1.mfn = pfn_to_mfn(pfn);
806
807 xen_extend_mmuext_op(&op);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700808}
809
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700810static int xen_pin_page(struct mm_struct *mm, struct page *page,
811 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700812{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700813 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700814 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700815
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700816 if (pgfl)
817 flush = 0; /* already pinned */
818 else if (PageHighMem(page))
819 /* kmaps need flushing if we found an unpinned
820 highpage */
821 flush = 1;
822 else {
823 void *pt = lowmem_page_address(page);
824 unsigned long pfn = page_to_pfn(page);
825 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700826 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700827
828 flush = 0;
829
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700830 /*
831 * We need to hold the pagetable lock between the time
832 * we make the pagetable RO and when we actually pin
833 * it. If we don't, then other users may come in and
834 * attempt to update the pagetable by writing it,
835 * which will fail because the memory is RO but not
836 * pinned, so Xen won't do the trap'n'emulate.
837 *
838 * If we're using split pte locks, we can't hold the
839 * entire pagetable's worth of locks during the
840 * traverse, because we may wrap the preempt count (8
841 * bits). The solution is to mark RO and pin each PTE
842 * page while holding the lock. This means the number
843 * of locks we end up holding is never more than a
844 * batch size (~32 entries, at present).
845 *
846 * If we're not using split pte locks, we needn't pin
847 * the PTE pages independently, because we're
848 * protected by the overall pagetable lock.
849 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700850 ptl = NULL;
851 if (level == PT_PTE)
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700852 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700853
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700854 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
855 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700856 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
857
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700858 if (ptl) {
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700859 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
860
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700861 /* Queue a deferred unlock for when this batch
862 is completed. */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700863 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700864 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700865 }
866
867 return flush;
868}
869
870/* This is called just after a mm has been created, but it has not
871 been used yet. We need to make sure that its pagetable is all
872 read-only, and can be pinned. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700873static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700874{
Jeremy Fitzhardinge5f94fb52010-12-17 15:31:23 -0800875 trace_xen_mmu_pgd_pin(mm, pgd);
876
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700877 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700878
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000879 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100880 /* re-enable interrupts for flushing */
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700881 xen_mc_issue(0);
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100882
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700883 kmap_flush_unused();
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100884
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700885 xen_mc_batch();
886 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700887
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700888#ifdef CONFIG_X86_64
889 {
890 pgd_t *user_pgd = xen_get_user_pgd(pgd);
891
892 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
893
894 if (user_pgd) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700895 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
Tejf63c2f22008-12-16 11:56:06 -0800896 xen_do_pin(MMUEXT_PIN_L4_TABLE,
897 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700898 }
899 }
900#else /* CONFIG_X86_32 */
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700901#ifdef CONFIG_X86_PAE
902 /* Need to make sure unshared kernel PMD is pinnable */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -0800903 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700904 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700905#endif
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100906 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700907#endif /* CONFIG_X86_64 */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700908 xen_mc_issue(0);
909}
910
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700911static void xen_pgd_pin(struct mm_struct *mm)
912{
913 __xen_pgd_pin(mm, mm->pgd);
914}
915
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100916/*
917 * On save, we need to pin all pagetables to make sure they get their
918 * mfns turned into pfns. Search the list for any unpinned pgds and pin
919 * them (unpinned pgds are not currently in use, probably because the
920 * process is under construction or destruction).
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700921 *
922 * Expected to be called in stop_machine() ("equivalent to taking
923 * every spinlock in the system"), so the locking doesn't really
924 * matter all that much.
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100925 */
926void xen_mm_pin_all(void)
927{
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100928 struct page *page;
929
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800930 spin_lock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100931
932 list_for_each_entry(page, &pgd_list, lru) {
933 if (!PagePinned(page)) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700934 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100935 SetPageSavePinned(page);
936 }
937 }
938
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800939 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100940}
941
Eduardo Habkostc1f2f092008-07-08 15:06:24 -0700942/*
943 * The init_mm pagetable is really pinned as soon as its created, but
944 * that's before we have page structures to store the bits. So do all
945 * the book-keeping now.
946 */
Daniel Kiper3f5089532011-05-12 17:19:53 -0400947static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700948 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700949{
950 SetPagePinned(page);
951 return 0;
952}
953
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -0700954static void __init xen_mark_init_mm_pinned(void)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700955{
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700956 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700957}
958
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700959static int xen_unpin_page(struct mm_struct *mm, struct page *page,
960 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700961{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700962 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700963
964 if (pgfl && !PageHighMem(page)) {
965 void *pt = lowmem_page_address(page);
966 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700967 spinlock_t *ptl = NULL;
968 struct multicall_space mcs;
969
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700970 /*
971 * Do the converse to pin_page. If we're using split
972 * pte locks, we must be holding the lock for while
973 * the pte page is unpinned but still RO to prevent
974 * concurrent updates from seeing it in this
975 * partially-pinned state.
976 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700977 if (level == PT_PTE) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700978 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700979
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700980 if (ptl)
981 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700982 }
983
984 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700985
986 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
987 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700988 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
989
990 if (ptl) {
991 /* unlock when batch completed */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700992 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700993 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700994 }
995
996 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700997}
998
999/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001000static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001001{
Jeremy Fitzhardinge5f94fb52010-12-17 15:31:23 -08001002 trace_xen_mmu_pgd_unpin(mm, pgd);
1003
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001004 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001005
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001006 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001007
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001008#ifdef CONFIG_X86_64
1009 {
1010 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1011
1012 if (user_pgd) {
Tejf63c2f22008-12-16 11:56:06 -08001013 xen_do_pin(MMUEXT_UNPIN_TABLE,
1014 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001015 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001016 }
1017 }
1018#endif
1019
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001020#ifdef CONFIG_X86_PAE
1021 /* Need to make sure unshared kernel PMD is unpinned */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -08001022 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001023 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001024#endif
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001025
Ian Campbell86bbc2c2008-11-21 10:21:33 +00001026 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001027
1028 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001029}
1030
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001031static void xen_pgd_unpin(struct mm_struct *mm)
1032{
1033 __xen_pgd_unpin(mm, mm->pgd);
1034}
1035
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001036/*
1037 * On resume, undo any pinning done at save, so that the rest of the
1038 * kernel doesn't see any unexpected pinned pagetables.
1039 */
1040void xen_mm_unpin_all(void)
1041{
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001042 struct page *page;
1043
Andrea Arcangelia79e53d2011-02-16 15:45:22 -08001044 spin_lock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001045
1046 list_for_each_entry(page, &pgd_list, lru) {
1047 if (PageSavePinned(page)) {
1048 BUG_ON(!PagePinned(page));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001049 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001050 ClearPageSavePinned(page);
1051 }
1052 }
1053
Andrea Arcangelia79e53d2011-02-16 15:45:22 -08001054 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001055}
1056
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001057static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001058{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001059 spin_lock(&next->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001060 xen_pgd_pin(next);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001061 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001062}
1063
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001064static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001065{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001066 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001067 xen_pgd_pin(mm);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001068 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001069}
1070
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001071
1072#ifdef CONFIG_SMP
1073/* Another cpu may still have their %cr3 pointing at the pagetable, so
1074 we need to repoint it somewhere else before we can unpin it. */
1075static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001076{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001077 struct mm_struct *mm = info;
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001078 struct mm_struct *active_mm;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001079
Alex Shi2113f462012-01-13 23:53:35 +08001080 active_mm = this_cpu_read(cpu_tlbstate.active_mm);
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001081
Alex Shi2113f462012-01-13 23:53:35 +08001082 if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001083 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001084
1085 /* If this cpu still has a stale cr3 reference, then make sure
1086 it has been flushed. */
Alex Shi2113f462012-01-13 23:53:35 +08001087 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001088 load_cr3(swapper_pg_dir);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001089}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001090
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001091static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001092{
Mike Travise4d98202008-12-16 17:34:05 -08001093 cpumask_var_t mask;
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001094 unsigned cpu;
1095
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001096 if (current->active_mm == mm) {
1097 if (current->mm == mm)
1098 load_cr3(swapper_pg_dir);
1099 else
1100 leave_mm(smp_processor_id());
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001101 }
1102
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001103 /* Get the "official" set of cpus referring to our pagetable. */
Mike Travise4d98202008-12-16 17:34:05 -08001104 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1105 for_each_online_cpu(cpu) {
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001106 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
Mike Travise4d98202008-12-16 17:34:05 -08001107 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1108 continue;
1109 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1110 }
1111 return;
1112 }
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001113 cpumask_copy(mask, mm_cpumask(mm));
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001114
1115 /* It's possible that a vcpu may have a stale reference to our
1116 cr3, because its in lazy mode, and it hasn't yet flushed
1117 its set of pending hypercalls yet. In this case, we can
1118 look at its actual current cr3 value, and force it to flush
1119 if needed. */
1120 for_each_online_cpu(cpu) {
1121 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
Mike Travise4d98202008-12-16 17:34:05 -08001122 cpumask_set_cpu(cpu, mask);
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001123 }
1124
Mike Travise4d98202008-12-16 17:34:05 -08001125 if (!cpumask_empty(mask))
1126 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1127 free_cpumask_var(mask);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001128}
1129#else
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001130static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001131{
1132 if (current->active_mm == mm)
1133 load_cr3(swapper_pg_dir);
1134}
1135#endif
1136
1137/*
1138 * While a process runs, Xen pins its pagetables, which means that the
1139 * hypervisor forces it to be read-only, and it controls all updates
1140 * to it. This means that all pagetable updates have to go via the
1141 * hypervisor, which is moderately expensive.
1142 *
1143 * Since we're pulling the pagetable down, we switch to use init_mm,
1144 * unpin old process pagetable and mark it all read-write, which
1145 * allows further operations on it to be simple memory accesses.
1146 *
1147 * The only subtle point is that another CPU may be still using the
1148 * pagetable because of lazy tlb flushing. This means we need need to
1149 * switch all CPUs off this pagetable before we can unpin it.
1150 */
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001151static void xen_exit_mmap(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001152{
1153 get_cpu(); /* make sure we don't move around */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001154 xen_drop_mm_ref(mm);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001155 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001156
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001157 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -07001158
1159 /* pgd may not be pinned in the error exit path of execve */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001160 if (xen_page_pinned(mm->pgd))
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001161 xen_pgd_unpin(mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001162
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001163 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001164}
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07001165
Daniel Kiper3f5089532011-05-12 17:19:53 -04001166static void __init xen_pagetable_setup_start(pgd_t *base)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001167{
1168}
1169
Stefano Stabellini279b7062011-04-14 15:49:41 +01001170static __init void xen_mapping_pagetable_reserve(u64 start, u64 end)
1171{
1172 /* reserve the range used */
1173 native_pagetable_reserve(start, end);
1174
1175 /* set as RW the rest */
1176 printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end,
1177 PFN_PHYS(pgt_buf_top));
1178 while (end < PFN_PHYS(pgt_buf_top)) {
1179 make_lowmem_page_readwrite(__va(end));
1180 end += PAGE_SIZE;
1181 }
1182}
1183
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02001184static void xen_post_allocator_init(void);
1185
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001186#ifdef CONFIG_X86_64
1187static void __init xen_cleanhighmap(unsigned long vaddr,
1188 unsigned long vaddr_end)
1189{
1190 unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
1191 pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
1192
1193 /* NOTE: The loop is more greedy than the cleanup_highmap variant.
1194 * We include the PMD passed in on _both_ boundaries. */
1195 for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE));
1196 pmd++, vaddr += PMD_SIZE) {
1197 if (pmd_none(*pmd))
1198 continue;
1199 if (vaddr < (unsigned long) _text || vaddr > kernel_end)
1200 set_pmd(pmd, __pmd(0));
1201 }
1202 /* In case we did something silly, we should crash in this function
1203 * instead of somewhere later and be confusing. */
1204 xen_mc_flush();
1205}
1206#endif
Daniel Kiper3f5089532011-05-12 17:19:53 -04001207static void __init xen_pagetable_setup_done(pgd_t *base)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001208{
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001209#ifdef CONFIG_X86_64
1210 unsigned long size;
1211 unsigned long addr;
1212#endif
1213
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001214 xen_setup_shared_info();
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001215#ifdef CONFIG_X86_64
1216 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1217 unsigned long new_mfn_list;
1218
1219 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1220
1221 /* On 32-bit, we get zero so this never gets executed. */
1222 new_mfn_list = xen_revector_p2m_tree();
1223 if (new_mfn_list && new_mfn_list != xen_start_info->mfn_list) {
1224 /* using __ka address and sticking INVALID_P2M_ENTRY! */
1225 memset((void *)xen_start_info->mfn_list, 0xff, size);
1226
1227 /* We should be in __ka space. */
1228 BUG_ON(xen_start_info->mfn_list < __START_KERNEL_map);
1229 addr = xen_start_info->mfn_list;
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001230 /* We roundup to the PMD, which means that if anybody at this stage is
1231 * using the __ka address of xen_start_info or xen_start_info->shared_info
1232 * they are in going to crash. Fortunatly we have already revectored
1233 * in xen_setup_kernel_pagetable and in xen_setup_shared_info. */
1234 size = roundup(size, PMD_SIZE);
1235 xen_cleanhighmap(addr, addr + size);
1236
Konrad Rzeszutek Wilk785f6232012-08-14 16:37:31 -04001237 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001238 memblock_free(__pa(xen_start_info->mfn_list), size);
1239 /* And revector! Bye bye old array */
1240 xen_start_info->mfn_list = new_mfn_list;
1241 }
1242 }
Konrad Rzeszutek Wilk3aca7fb2012-08-14 14:34:00 -04001243 /* At this stage, cleanup_highmap has already cleaned __ka space
1244 * from _brk_limit way up to the max_pfn_mapped (which is the end of
1245 * the ramdisk). We continue on, erasing PMD entries that point to page
1246 * tables - do note that they are accessible at this stage via __va.
1247 * For good measure we also round up to the PMD - which means that if
1248 * anybody is using __ka address to the initial boot-stack - and try
1249 * to use it - they are going to crash. The xen_start_info has been
1250 * taken care of already in xen_setup_kernel_pagetable. */
1251 addr = xen_start_info->pt_base;
1252 size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE);
1253
1254 xen_cleanhighmap(addr, addr + size);
1255 xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
1256#ifdef DEBUG
1257 /* This is superflous and is not neccessary, but you know what
1258 * lets do it. The MODULES_VADDR -> MODULES_END should be clear of
1259 * anything at this stage. */
1260 xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
1261#endif
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001262#endif
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02001263 xen_post_allocator_init();
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001264}
1265
1266static void xen_write_cr2(unsigned long cr2)
1267{
Alex Shi2113f462012-01-13 23:53:35 +08001268 this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001269}
1270
1271static unsigned long xen_read_cr2(void)
1272{
Alex Shi2113f462012-01-13 23:53:35 +08001273 return this_cpu_read(xen_vcpu)->arch.cr2;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001274}
1275
1276unsigned long xen_read_cr2_direct(void)
1277{
Alex Shi2113f462012-01-13 23:53:35 +08001278 return this_cpu_read(xen_vcpu_info.arch.cr2);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001279}
1280
1281static void xen_flush_tlb(void)
1282{
1283 struct mmuext_op *op;
1284 struct multicall_space mcs;
1285
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001286 trace_xen_mmu_flush_tlb(0);
1287
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001288 preempt_disable();
1289
1290 mcs = xen_mc_entry(sizeof(*op));
1291
1292 op = mcs.args;
1293 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1294 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1295
1296 xen_mc_issue(PARAVIRT_LAZY_MMU);
1297
1298 preempt_enable();
1299}
1300
1301static void xen_flush_tlb_single(unsigned long addr)
1302{
1303 struct mmuext_op *op;
1304 struct multicall_space mcs;
1305
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001306 trace_xen_mmu_flush_tlb_single(addr);
1307
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001308 preempt_disable();
1309
1310 mcs = xen_mc_entry(sizeof(*op));
1311 op = mcs.args;
1312 op->cmd = MMUEXT_INVLPG_LOCAL;
1313 op->arg1.linear_addr = addr & PAGE_MASK;
1314 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1315
1316 xen_mc_issue(PARAVIRT_LAZY_MMU);
1317
1318 preempt_enable();
1319}
1320
1321static void xen_flush_tlb_others(const struct cpumask *cpus,
1322 struct mm_struct *mm, unsigned long va)
1323{
1324 struct {
1325 struct mmuext_op op;
Konrad Rzeszutek Wilk32dd1192011-06-30 09:12:40 -04001326#ifdef CONFIG_SMP
Andrew Jones900cba82009-12-18 10:31:31 +01001327 DECLARE_BITMAP(mask, num_processors);
Konrad Rzeszutek Wilk32dd1192011-06-30 09:12:40 -04001328#else
1329 DECLARE_BITMAP(mask, NR_CPUS);
1330#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001331 } *args;
1332 struct multicall_space mcs;
1333
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001334 trace_xen_mmu_flush_tlb_others(cpus, mm, va);
1335
Jeremy Fitzhardingee3f8a742009-03-04 17:36:57 -08001336 if (cpumask_empty(cpus))
1337 return; /* nothing to do */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001338
1339 mcs = xen_mc_entry(sizeof(*args));
1340 args = mcs.args;
1341 args->op.arg2.vcpumask = to_cpumask(args->mask);
1342
1343 /* Remove us, and any offline CPUS. */
1344 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1345 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001346
1347 if (va == TLB_FLUSH_ALL) {
1348 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1349 } else {
1350 args->op.cmd = MMUEXT_INVLPG_MULTI;
1351 args->op.arg1.linear_addr = va;
1352 }
1353
1354 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1355
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001356 xen_mc_issue(PARAVIRT_LAZY_MMU);
1357}
1358
1359static unsigned long xen_read_cr3(void)
1360{
Alex Shi2113f462012-01-13 23:53:35 +08001361 return this_cpu_read(xen_cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001362}
1363
1364static void set_current_cr3(void *v)
1365{
Alex Shi2113f462012-01-13 23:53:35 +08001366 this_cpu_write(xen_current_cr3, (unsigned long)v);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001367}
1368
1369static void __xen_write_cr3(bool kernel, unsigned long cr3)
1370{
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001371 struct mmuext_op op;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001372 unsigned long mfn;
1373
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001374 trace_xen_mmu_write_cr3(kernel, cr3);
1375
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001376 if (cr3)
1377 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1378 else
1379 mfn = 0;
1380
1381 WARN_ON(mfn == 0 && kernel);
1382
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001383 op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1384 op.arg1.mfn = mfn;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001385
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001386 xen_extend_mmuext_op(&op);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001387
1388 if (kernel) {
Alex Shi2113f462012-01-13 23:53:35 +08001389 this_cpu_write(xen_cr3, cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001390
1391 /* Update xen_current_cr3 once the batch has actually
1392 been submitted. */
1393 xen_mc_callback(set_current_cr3, (void *)cr3);
1394 }
1395}
1396
1397static void xen_write_cr3(unsigned long cr3)
1398{
1399 BUG_ON(preemptible());
1400
1401 xen_mc_batch(); /* disables interrupts */
1402
1403 /* Update while interrupts are disabled, so its atomic with
1404 respect to ipis */
Alex Shi2113f462012-01-13 23:53:35 +08001405 this_cpu_write(xen_cr3, cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001406
1407 __xen_write_cr3(true, cr3);
1408
1409#ifdef CONFIG_X86_64
1410 {
1411 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1412 if (user_pgd)
1413 __xen_write_cr3(false, __pa(user_pgd));
1414 else
1415 __xen_write_cr3(false, 0);
1416 }
1417#endif
1418
1419 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1420}
1421
1422static int xen_pgd_alloc(struct mm_struct *mm)
1423{
1424 pgd_t *pgd = mm->pgd;
1425 int ret = 0;
1426
1427 BUG_ON(PagePinned(virt_to_page(pgd)));
1428
1429#ifdef CONFIG_X86_64
1430 {
1431 struct page *page = virt_to_page(pgd);
1432 pgd_t *user_pgd;
1433
1434 BUG_ON(page->private != 0);
1435
1436 ret = -ENOMEM;
1437
1438 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1439 page->private = (unsigned long)user_pgd;
1440
1441 if (user_pgd != NULL) {
1442 user_pgd[pgd_index(VSYSCALL_START)] =
1443 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1444 ret = 0;
1445 }
1446
1447 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1448 }
1449#endif
1450
1451 return ret;
1452}
1453
1454static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1455{
1456#ifdef CONFIG_X86_64
1457 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1458
1459 if (user_pgd)
1460 free_page((unsigned long)user_pgd);
1461#endif
1462}
1463
Stefano Stabelliniee176452011-04-19 14:47:31 +01001464#ifdef CONFIG_X86_32
Daniel Kiper3f5089532011-05-12 17:19:53 -04001465static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001466{
1467 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1468 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1469 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1470 pte_val_ma(pte));
Stefano Stabelliniee176452011-04-19 14:47:31 +01001471
1472 return pte;
1473}
1474#else /* CONFIG_X86_64 */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001475static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
Stefano Stabelliniee176452011-04-19 14:47:31 +01001476{
1477 unsigned long pfn = pte_pfn(pte);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -07001478
1479 /*
1480 * If the new pfn is within the range of the newly allocated
1481 * kernel pagetable, and it isn't being mapped into an
Stefano Stabellinid8aa5ec2011-03-09 14:22:05 +00001482 * early_ioremap fixmap slot as a freshly allocated page, make sure
1483 * it is RO.
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -07001484 */
Stefano Stabellinid8aa5ec2011-03-09 14:22:05 +00001485 if (((!is_early_ioremap_ptep(ptep) &&
Stefano Stabellinib9269dc2011-04-12 12:19:49 +01001486 pfn >= pgt_buf_start && pfn < pgt_buf_top)) ||
Stefano Stabellinid8aa5ec2011-03-09 14:22:05 +00001487 (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1)))
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -07001488 pte = pte_wrprotect(pte);
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001489
1490 return pte;
1491}
Stefano Stabelliniee176452011-04-19 14:47:31 +01001492#endif /* CONFIG_X86_64 */
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001493
1494/* Init-time set_pte while constructing initial pagetables, which
1495 doesn't allow RO pagetable pages to be remapped RW */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001496static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001497{
1498 pte = mask_rw_pte(ptep, pte);
1499
1500 xen_set_pte(ptep, pte);
1501}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001502
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001503static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1504{
1505 struct mmuext_op op;
1506 op.cmd = cmd;
1507 op.arg1.mfn = pfn_to_mfn(pfn);
1508 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1509 BUG();
1510}
1511
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001512/* Early in boot, while setting up the initial pagetable, assume
1513 everything is pinned. */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001514static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001515{
1516#ifdef CONFIG_FLATMEM
1517 BUG_ON(mem_map); /* should only be used early */
1518#endif
1519 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001520 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1521}
1522
1523/* Used for pmd and pud */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001524static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001525{
1526#ifdef CONFIG_FLATMEM
1527 BUG_ON(mem_map); /* should only be used early */
1528#endif
1529 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001530}
1531
1532/* Early release_pte assumes that all pts are pinned, since there's
1533 only init_mm and anything attached to that is pinned. */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001534static void __init xen_release_pte_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001535{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001536 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001537 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1538}
1539
Daniel Kiper3f5089532011-05-12 17:19:53 -04001540static void __init xen_release_pmd_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001541{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001542 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001543}
1544
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001545static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1546{
1547 struct multicall_space mcs;
1548 struct mmuext_op *op;
1549
1550 mcs = __xen_mc_entry(sizeof(*op));
1551 op = mcs.args;
1552 op->cmd = cmd;
1553 op->arg1.mfn = pfn_to_mfn(pfn);
1554
1555 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1556}
1557
1558static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1559{
1560 struct multicall_space mcs;
1561 unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1562
1563 mcs = __xen_mc_entry(0);
1564 MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1565 pfn_pte(pfn, prot), 0);
1566}
1567
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001568/* This needs to make sure the new pte page is pinned iff its being
1569 attached to a pinned pagetable. */
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001570static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1571 unsigned level)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001572{
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001573 bool pinned = PagePinned(virt_to_page(mm->pgd));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001574
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001575 trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001576
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001577 if (pinned) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001578 struct page *page = pfn_to_page(pfn);
1579
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001580 SetPagePinned(page);
1581
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001582 if (!PageHighMem(page)) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001583 xen_mc_batch();
1584
1585 __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1586
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001587 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001588 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1589
1590 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001591 } else {
1592 /* make sure there are no stray mappings of
1593 this page */
1594 kmap_flush_unused();
1595 }
1596 }
1597}
1598
1599static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1600{
1601 xen_alloc_ptpage(mm, pfn, PT_PTE);
1602}
1603
1604static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1605{
1606 xen_alloc_ptpage(mm, pfn, PT_PMD);
1607}
1608
1609/* This should never happen until we're OK to use struct page */
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001610static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001611{
1612 struct page *page = pfn_to_page(pfn);
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001613 bool pinned = PagePinned(page);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001614
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001615 trace_xen_mmu_release_ptpage(pfn, level, pinned);
1616
1617 if (pinned) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001618 if (!PageHighMem(page)) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001619 xen_mc_batch();
1620
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001621 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001622 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1623
1624 __set_pfn_prot(pfn, PAGE_KERNEL);
1625
1626 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001627 }
1628 ClearPagePinned(page);
1629 }
1630}
1631
1632static void xen_release_pte(unsigned long pfn)
1633{
1634 xen_release_ptpage(pfn, PT_PTE);
1635}
1636
1637static void xen_release_pmd(unsigned long pfn)
1638{
1639 xen_release_ptpage(pfn, PT_PMD);
1640}
1641
1642#if PAGETABLE_LEVELS == 4
1643static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1644{
1645 xen_alloc_ptpage(mm, pfn, PT_PUD);
1646}
1647
1648static void xen_release_pud(unsigned long pfn)
1649{
1650 xen_release_ptpage(pfn, PT_PUD);
1651}
1652#endif
1653
1654void __init xen_reserve_top(void)
1655{
1656#ifdef CONFIG_X86_32
1657 unsigned long top = HYPERVISOR_VIRT_START;
1658 struct xen_platform_parameters pp;
1659
1660 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1661 top = pp.virt_start;
1662
1663 reserve_top_address(-top);
1664#endif /* CONFIG_X86_32 */
1665}
1666
1667/*
1668 * Like __va(), but returns address in the kernel mapping (which is
1669 * all we have until the physical memory mapping has been set up.
1670 */
1671static void *__ka(phys_addr_t paddr)
1672{
1673#ifdef CONFIG_X86_64
1674 return (void *)(paddr + __START_KERNEL_map);
1675#else
1676 return __va(paddr);
1677#endif
1678}
1679
1680/* Convert a machine address to physical address */
1681static unsigned long m2p(phys_addr_t maddr)
1682{
1683 phys_addr_t paddr;
1684
1685 maddr &= PTE_PFN_MASK;
1686 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1687
1688 return paddr;
1689}
1690
1691/* Convert a machine address to kernel virtual */
1692static void *m2v(phys_addr_t maddr)
1693{
1694 return __ka(m2p(maddr));
1695}
1696
Juan Quintela4ec53872010-09-02 15:45:43 +01001697/* Set the page permissions on an identity-mapped pages */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001698static void set_page_prot(void *addr, pgprot_t prot)
1699{
1700 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1701 pte_t pte = pfn_pte(pfn, prot);
1702
1703 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1704 BUG();
1705}
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -04001706#ifdef CONFIG_X86_32
Daniel Kiper3f5089532011-05-12 17:19:53 -04001707static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001708{
1709 unsigned pmdidx, pteidx;
1710 unsigned ident_pte;
1711 unsigned long pfn;
1712
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001713 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1714 PAGE_SIZE);
1715
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001716 ident_pte = 0;
1717 pfn = 0;
1718 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1719 pte_t *pte_page;
1720
1721 /* Reuse or allocate a page of ptes */
1722 if (pmd_present(pmd[pmdidx]))
1723 pte_page = m2v(pmd[pmdidx].pmd);
1724 else {
1725 /* Check for free pte pages */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001726 if (ident_pte == LEVEL1_IDENT_ENTRIES)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001727 break;
1728
1729 pte_page = &level1_ident_pgt[ident_pte];
1730 ident_pte += PTRS_PER_PTE;
1731
1732 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1733 }
1734
1735 /* Install mappings */
1736 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1737 pte_t pte;
1738
Stefano Stabellinia91d9282011-06-03 09:51:34 +00001739#ifdef CONFIG_X86_32
1740 if (pfn > max_pfn_mapped)
1741 max_pfn_mapped = pfn;
1742#endif
1743
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001744 if (!pte_none(pte_page[pteidx]))
1745 continue;
1746
1747 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1748 pte_page[pteidx] = pte;
1749 }
1750 }
1751
1752 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1753 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1754
1755 set_page_prot(pmd, PAGE_KERNEL_RO);
1756}
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -04001757#endif
Ian Campbell7e775062010-09-30 12:37:26 +01001758void __init xen_setup_machphys_mapping(void)
1759{
1760 struct xen_machphys_mapping mapping;
Ian Campbell7e775062010-09-30 12:37:26 +01001761
1762 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1763 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
Jan Beulichccbcdf72011-08-16 15:07:41 +01001764 machine_to_phys_nr = mapping.max_mfn + 1;
Ian Campbell7e775062010-09-30 12:37:26 +01001765 } else {
Jan Beulichccbcdf72011-08-16 15:07:41 +01001766 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
Ian Campbell7e775062010-09-30 12:37:26 +01001767 }
Jan Beulichccbcdf72011-08-16 15:07:41 +01001768#ifdef CONFIG_X86_32
Jan Beulich61cca2f2011-09-15 08:52:40 +01001769 WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
1770 < machine_to_phys_mapping);
Jan Beulichccbcdf72011-08-16 15:07:41 +01001771#endif
Ian Campbell7e775062010-09-30 12:37:26 +01001772}
1773
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001774#ifdef CONFIG_X86_64
1775static void convert_pfn_mfn(void *v)
1776{
1777 pte_t *pte = v;
1778 int i;
1779
1780 /* All levels are converted the same way, so just treat them
1781 as ptes. */
1782 for (i = 0; i < PTRS_PER_PTE; i++)
1783 pte[i] = xen_make_pte(pte[i].pte);
1784}
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001785static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
1786 unsigned long addr)
1787{
1788 if (*pt_base == PFN_DOWN(__pa(addr))) {
1789 set_page_prot((void *)addr, PAGE_KERNEL);
1790 clear_page((void *)addr);
1791 (*pt_base)++;
1792 }
1793 if (*pt_end == PFN_DOWN(__pa(addr))) {
1794 set_page_prot((void *)addr, PAGE_KERNEL);
1795 clear_page((void *)addr);
1796 (*pt_end)--;
1797 }
1798}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001799/*
Lucas De Marchi0d2eb442011-03-17 16:24:16 -03001800 * Set up the initial kernel pagetable.
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001801 *
1802 * We can construct this by grafting the Xen provided pagetable into
1803 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1804 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
1805 * means that only the kernel has a physical mapping to start with -
1806 * but that's enough to get __va working. We need to fill in the rest
1807 * of the physical mapping once some sort of allocator has been set
1808 * up.
1809 */
Konrad Rzeszutek Wilk3699aad2012-06-28 22:47:35 -04001810void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001811{
1812 pud_t *l3;
1813 pmd_t *l2;
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001814 unsigned long addr[3];
1815 unsigned long pt_base, pt_end;
1816 unsigned i;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001817
Stefano Stabellini14988a42011-02-18 11:32:40 +00001818 /* max_pfn_mapped is the last pfn mapped in the initial memory
1819 * mappings. Considering that on Xen after the kernel mappings we
1820 * have the mappings of some pages that don't exist in pfn space, we
1821 * set max_pfn_mapped to the last real pfn mapped. */
1822 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1823
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001824 pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
1825 pt_end = pt_base + xen_start_info->nr_pt_frames;
1826
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001827 /* Zap identity mapping */
1828 init_level4_pgt[0] = __pgd(0);
1829
1830 /* Pre-constructed entries are in pfn, so convert to mfn */
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001831 /* L4[272] -> level3_ident_pgt
1832 * L4[511] -> level3_kernel_pgt */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001833 convert_pfn_mfn(init_level4_pgt);
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001834
1835 /* L3_i[0] -> level2_ident_pgt */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001836 convert_pfn_mfn(level3_ident_pgt);
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001837 /* L3_k[510] -> level2_kernel_pgt
1838 * L3_i[511] -> level2_fixmap_pgt */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001839 convert_pfn_mfn(level3_kernel_pgt);
1840
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001841 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001842 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1843 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1844
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001845 addr[0] = (unsigned long)pgd;
1846 addr[1] = (unsigned long)l3;
1847 addr[2] = (unsigned long)l2;
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001848 /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
1849 * Both L4[272][0] and L4[511][511] have entries that point to the same
1850 * L2 (PMD) tables. Meaning that if you modify it in __va space
1851 * it will be also modified in the __ka space! (But if you just
1852 * modify the PMD table to point to other PTE's or none, then you
1853 * are OK - which is what cleanup_highmap does) */
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001854 copy_page(level2_ident_pgt, l2);
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001855 /* Graft it onto L4[511][511] */
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001856 copy_page(level2_kernel_pgt, l2);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001857
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001858 /* Get [511][510] and graft that in level2_fixmap_pgt */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001859 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1860 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001861 copy_page(level2_fixmap_pgt, l2);
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001862 /* Note that we don't do anything with level1_fixmap_pgt which
1863 * we don't need. */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001864
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001865 /* Make pagetable pieces RO */
1866 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1867 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1868 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1869 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -04001870 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001871 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1872 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1873
1874 /* Pin down new L4 */
1875 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1876 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1877
1878 /* Unpin Xen-provided one */
1879 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1880
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001881 /*
1882 * At this stage there can be no user pgd, and no page
1883 * structure to attach it to, so make sure we just set kernel
1884 * pgd.
1885 */
1886 xen_mc_batch();
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001887 __xen_write_cr3(true, __pa(init_level4_pgt));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001888 xen_mc_issue(PARAVIRT_LAZY_CPU);
1889
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001890 /* We can't that easily rip out L3 and L2, as the Xen pagetables are
1891 * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
1892 * the initial domain. For guests using the toolstack, they are in:
1893 * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
1894 * rip out the [L4] (pgd), but for guests we shave off three pages.
1895 */
1896 for (i = 0; i < ARRAY_SIZE(addr); i++)
1897 check_pt_base(&pt_base, &pt_end, addr[i]);
1898
1899 /* Our (by three pages) smaller Xen pagetable that we are using */
1900 memblock_reserve(PFN_PHYS(pt_base), (pt_end - pt_base) * PAGE_SIZE);
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001901 /* Revector the xen_start_info */
1902 xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001903}
1904#else /* !CONFIG_X86_64 */
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001905static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
1906static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
1907
Daniel Kiper3f5089532011-05-12 17:19:53 -04001908static void __init xen_write_cr3_init(unsigned long cr3)
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001909{
1910 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
1911
1912 BUG_ON(read_cr3() != __pa(initial_page_table));
1913 BUG_ON(cr3 != __pa(swapper_pg_dir));
1914
1915 /*
1916 * We are switching to swapper_pg_dir for the first time (from
1917 * initial_page_table) and therefore need to mark that page
1918 * read-only and then pin it.
1919 *
1920 * Xen disallows sharing of kernel PMDs for PAE
1921 * guests. Therefore we must copy the kernel PMD from
1922 * initial_page_table into a new kernel PMD to be used in
1923 * swapper_pg_dir.
1924 */
1925 swapper_kernel_pmd =
1926 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001927 copy_page(swapper_kernel_pmd, initial_kernel_pmd);
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001928 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
1929 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
1930 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
1931
1932 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1933 xen_write_cr3(cr3);
1934 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
1935
1936 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
1937 PFN_DOWN(__pa(initial_page_table)));
1938 set_page_prot(initial_page_table, PAGE_KERNEL);
1939 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
1940
1941 pv_mmu_ops.write_cr3 = &xen_write_cr3;
1942}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001943
Konrad Rzeszutek Wilk3699aad2012-06-28 22:47:35 -04001944void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001945{
1946 pmd_t *kernel_pmd;
1947
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001948 initial_kernel_pmd =
1949 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
Jeremy Fitzhardingef0991802010-08-26 16:16:28 -07001950
Stefano Stabellinia91d9282011-06-03 09:51:34 +00001951 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
1952 xen_start_info->nr_pt_frames * PAGE_SIZE +
1953 512*1024);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001954
1955 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001956 copy_page(initial_kernel_pmd, kernel_pmd);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001957
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001958 xen_map_identity_early(initial_kernel_pmd, max_pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001959
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001960 copy_page(initial_page_table, pgd);
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001961 initial_page_table[KERNEL_PGD_BOUNDARY] =
1962 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001963
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001964 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
1965 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001966 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
1967
1968 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1969
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001970 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
1971 PFN_DOWN(__pa(initial_page_table)));
1972 xen_write_cr3(__pa(initial_page_table));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001973
Tejun Heo24aa0782011-07-12 11:16:06 +02001974 memblock_reserve(__pa(xen_start_info->pt_base),
Konrad Rzeszutek Wilkdc6821e2012-01-07 21:27:38 -05001975 xen_start_info->nr_pt_frames * PAGE_SIZE);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001976}
1977#endif /* CONFIG_X86_64 */
1978
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01001979static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
1980
Masami Hiramatsu3b3809a2009-04-09 10:55:33 -07001981static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001982{
1983 pte_t pte;
1984
1985 phys >>= PAGE_SHIFT;
1986
1987 switch (idx) {
1988 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
1989#ifdef CONFIG_X86_F00F_BUG
1990 case FIX_F00F_IDT:
1991#endif
1992#ifdef CONFIG_X86_32
1993 case FIX_WP_TEST:
1994 case FIX_VDSO:
1995# ifdef CONFIG_HIGHMEM
1996 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
1997# endif
1998#else
1999 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
Andy Lutomirski5d5791a2011-08-03 09:31:52 -04002000 case VVAR_PAGE:
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002001#endif
Jeremy Fitzhardinge3ecb1b72009-03-07 23:48:41 -08002002 case FIX_TEXT_POKE0:
2003 case FIX_TEXT_POKE1:
2004 /* All local page mappings */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002005 pte = pfn_pte(phys, prot);
2006 break;
2007
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002008#ifdef CONFIG_X86_LOCAL_APIC
2009 case FIX_APIC_BASE: /* maps dummy local APIC */
2010 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2011 break;
2012#endif
2013
2014#ifdef CONFIG_X86_IO_APIC
2015 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
2016 /*
2017 * We just don't map the IO APIC - all access is via
2018 * hypercalls. Keep the address in the pte for reference.
2019 */
Konrad Rzeszutek Wilk27abd142012-04-16 13:53:40 -04002020 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002021 break;
2022#endif
2023
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08002024 case FIX_PARAVIRT_BOOTMAP:
2025 /* This is an MFN, but it isn't an IO mapping from the
2026 IO domain */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002027 pte = mfn_pte(phys, prot);
2028 break;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08002029
2030 default:
2031 /* By default, set_fixmap is used for hardware mappings */
2032 pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP));
2033 break;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002034 }
2035
2036 __native_set_fixmap(idx, pte);
2037
2038#ifdef CONFIG_X86_64
2039 /* Replicate changes to map the vsyscall page into the user
2040 pagetable vsyscall mapping. */
Andy Lutomirski5d5791a2011-08-03 09:31:52 -04002041 if ((idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) ||
2042 idx == VVAR_PAGE) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002043 unsigned long vaddr = __fix_to_virt(idx);
2044 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
2045 }
2046#endif
2047}
2048
Daniel Kiper3f5089532011-05-12 17:19:53 -04002049static void __init xen_post_allocator_init(void)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002050{
2051 pv_mmu_ops.set_pte = xen_set_pte;
2052 pv_mmu_ops.set_pmd = xen_set_pmd;
2053 pv_mmu_ops.set_pud = xen_set_pud;
2054#if PAGETABLE_LEVELS == 4
2055 pv_mmu_ops.set_pgd = xen_set_pgd;
2056#endif
2057
2058 /* This will work as long as patching hasn't happened yet
2059 (which it hasn't) */
2060 pv_mmu_ops.alloc_pte = xen_alloc_pte;
2061 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
2062 pv_mmu_ops.release_pte = xen_release_pte;
2063 pv_mmu_ops.release_pmd = xen_release_pmd;
2064#if PAGETABLE_LEVELS == 4
2065 pv_mmu_ops.alloc_pud = xen_alloc_pud;
2066 pv_mmu_ops.release_pud = xen_release_pud;
2067#endif
2068
2069#ifdef CONFIG_X86_64
2070 SetPagePinned(virt_to_page(level3_user_vsyscall));
2071#endif
2072 xen_mark_init_mm_pinned();
2073}
2074
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002075static void xen_leave_lazy_mmu(void)
2076{
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08002077 preempt_disable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002078 xen_mc_flush();
2079 paravirt_leave_lazy_mmu();
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08002080 preempt_enable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002081}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002082
Daniel Kiper3f5089532011-05-12 17:19:53 -04002083static const struct pv_mmu_ops xen_mmu_ops __initconst = {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002084 .read_cr2 = xen_read_cr2,
2085 .write_cr2 = xen_write_cr2,
2086
2087 .read_cr3 = xen_read_cr3,
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002088#ifdef CONFIG_X86_32
2089 .write_cr3 = xen_write_cr3_init,
2090#else
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002091 .write_cr3 = xen_write_cr3,
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002092#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002093
2094 .flush_tlb_user = xen_flush_tlb,
2095 .flush_tlb_kernel = xen_flush_tlb,
2096 .flush_tlb_single = xen_flush_tlb_single,
2097 .flush_tlb_others = xen_flush_tlb_others,
2098
2099 .pte_update = paravirt_nop,
2100 .pte_update_defer = paravirt_nop,
2101
2102 .pgd_alloc = xen_pgd_alloc,
2103 .pgd_free = xen_pgd_free,
2104
2105 .alloc_pte = xen_alloc_pte_init,
2106 .release_pte = xen_release_pte_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002107 .alloc_pmd = xen_alloc_pmd_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002108 .release_pmd = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002109
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002110 .set_pte = xen_set_pte_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002111 .set_pte_at = xen_set_pte_at,
2112 .set_pmd = xen_set_pmd_hyper,
2113
2114 .ptep_modify_prot_start = __ptep_modify_prot_start,
2115 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2116
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002117 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2118 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002119
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002120 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
2121 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002122
2123#ifdef CONFIG_X86_PAE
2124 .set_pte_atomic = xen_set_pte_atomic,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002125 .pte_clear = xen_pte_clear,
2126 .pmd_clear = xen_pmd_clear,
2127#endif /* CONFIG_X86_PAE */
2128 .set_pud = xen_set_pud_hyper,
2129
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002130 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2131 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002132
2133#if PAGETABLE_LEVELS == 4
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002134 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2135 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002136 .set_pgd = xen_set_pgd_hyper,
2137
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002138 .alloc_pud = xen_alloc_pmd_init,
2139 .release_pud = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002140#endif /* PAGETABLE_LEVELS == 4 */
2141
2142 .activate_mm = xen_activate_mm,
2143 .dup_mmap = xen_dup_mmap,
2144 .exit_mmap = xen_exit_mmap,
2145
2146 .lazy_mode = {
2147 .enter = paravirt_enter_lazy_mmu,
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002148 .leave = xen_leave_lazy_mmu,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002149 },
2150
2151 .set_fixmap = xen_set_fixmap,
2152};
2153
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002154void __init xen_init_mmu_ops(void)
2155{
Stefano Stabellini279b7062011-04-14 15:49:41 +01002156 x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve;
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002157 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
2158 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
2159 pv_mmu_ops = xen_mmu_ops;
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -07002160
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002161 memset(dummy_mapping, 0xff, PAGE_SIZE);
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002162}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002163
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002164/* Protected by xen_reservation_lock. */
2165#define MAX_CONTIG_ORDER 9 /* 2MB */
2166static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2167
2168#define VOID_PTE (mfn_pte(0, __pgprot(0)))
2169static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2170 unsigned long *in_frames,
2171 unsigned long *out_frames)
2172{
2173 int i;
2174 struct multicall_space mcs;
2175
2176 xen_mc_batch();
2177 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2178 mcs = __xen_mc_entry(0);
2179
2180 if (in_frames)
2181 in_frames[i] = virt_to_mfn(vaddr);
2182
2183 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
Konrad Rzeszutek Wilk6eaa4122011-01-18 20:09:41 -05002184 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002185
2186 if (out_frames)
2187 out_frames[i] = virt_to_pfn(vaddr);
2188 }
2189 xen_mc_issue(0);
2190}
2191
2192/*
2193 * Update the pfn-to-mfn mappings for a virtual address range, either to
2194 * point to an array of mfns, or contiguously from a single starting
2195 * mfn.
2196 */
2197static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2198 unsigned long *mfns,
2199 unsigned long first_mfn)
2200{
2201 unsigned i, limit;
2202 unsigned long mfn;
2203
2204 xen_mc_batch();
2205
2206 limit = 1u << order;
2207 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2208 struct multicall_space mcs;
2209 unsigned flags;
2210
2211 mcs = __xen_mc_entry(0);
2212 if (mfns)
2213 mfn = mfns[i];
2214 else
2215 mfn = first_mfn + i;
2216
2217 if (i < (limit - 1))
2218 flags = 0;
2219 else {
2220 if (order == 0)
2221 flags = UVMF_INVLPG | UVMF_ALL;
2222 else
2223 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2224 }
2225
2226 MULTI_update_va_mapping(mcs.mc, vaddr,
2227 mfn_pte(mfn, PAGE_KERNEL), flags);
2228
2229 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2230 }
2231
2232 xen_mc_issue(0);
2233}
2234
2235/*
2236 * Perform the hypercall to exchange a region of our pfns to point to
2237 * memory with the required contiguous alignment. Takes the pfns as
2238 * input, and populates mfns as output.
2239 *
2240 * Returns a success code indicating whether the hypervisor was able to
2241 * satisfy the request or not.
2242 */
2243static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2244 unsigned long *pfns_in,
2245 unsigned long extents_out,
2246 unsigned int order_out,
2247 unsigned long *mfns_out,
2248 unsigned int address_bits)
2249{
2250 long rc;
2251 int success;
2252
2253 struct xen_memory_exchange exchange = {
2254 .in = {
2255 .nr_extents = extents_in,
2256 .extent_order = order_in,
2257 .extent_start = pfns_in,
2258 .domid = DOMID_SELF
2259 },
2260 .out = {
2261 .nr_extents = extents_out,
2262 .extent_order = order_out,
2263 .extent_start = mfns_out,
2264 .address_bits = address_bits,
2265 .domid = DOMID_SELF
2266 }
2267 };
2268
2269 BUG_ON(extents_in << order_in != extents_out << order_out);
2270
2271 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2272 success = (exchange.nr_exchanged == extents_in);
2273
2274 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2275 BUG_ON(success && (rc != 0));
2276
2277 return success;
2278}
2279
2280int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
2281 unsigned int address_bits)
2282{
2283 unsigned long *in_frames = discontig_frames, out_frame;
2284 unsigned long flags;
2285 int success;
2286
2287 /*
2288 * Currently an auto-translated guest will not perform I/O, nor will
2289 * it require PAE page directories below 4GB. Therefore any calls to
2290 * this function are redundant and can be ignored.
2291 */
2292
2293 if (xen_feature(XENFEAT_auto_translated_physmap))
2294 return 0;
2295
2296 if (unlikely(order > MAX_CONTIG_ORDER))
2297 return -ENOMEM;
2298
2299 memset((void *) vstart, 0, PAGE_SIZE << order);
2300
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002301 spin_lock_irqsave(&xen_reservation_lock, flags);
2302
2303 /* 1. Zap current PTEs, remembering MFNs. */
2304 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2305
2306 /* 2. Get a new contiguous memory extent. */
2307 out_frame = virt_to_pfn(vstart);
2308 success = xen_exchange_memory(1UL << order, 0, in_frames,
2309 1, order, &out_frame,
2310 address_bits);
2311
2312 /* 3. Map the new extent in place of old pages. */
2313 if (success)
2314 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2315 else
2316 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2317
2318 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2319
2320 return success ? 0 : -ENOMEM;
2321}
2322EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2323
2324void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
2325{
2326 unsigned long *out_frames = discontig_frames, in_frame;
2327 unsigned long flags;
2328 int success;
2329
2330 if (xen_feature(XENFEAT_auto_translated_physmap))
2331 return;
2332
2333 if (unlikely(order > MAX_CONTIG_ORDER))
2334 return;
2335
2336 memset((void *) vstart, 0, PAGE_SIZE << order);
2337
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002338 spin_lock_irqsave(&xen_reservation_lock, flags);
2339
2340 /* 1. Find start MFN of contiguous extent. */
2341 in_frame = virt_to_mfn(vstart);
2342
2343 /* 2. Zap current PTEs. */
2344 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2345
2346 /* 3. Do the exchange for non-contiguous MFNs. */
2347 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2348 0, out_frames, 0);
2349
2350 /* 4. Map new pages in place of old pages. */
2351 if (success)
2352 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2353 else
2354 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2355
2356 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2357}
2358EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2359
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002360#ifdef CONFIG_XEN_PVHVM
Stefano Stabellini59151002010-06-17 14:22:52 +01002361static void xen_hvm_exit_mmap(struct mm_struct *mm)
2362{
2363 struct xen_hvm_pagetable_dying a;
2364 int rc;
2365
2366 a.domid = DOMID_SELF;
2367 a.gpa = __pa(mm->pgd);
2368 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2369 WARN_ON_ONCE(rc < 0);
2370}
2371
2372static int is_pagetable_dying_supported(void)
2373{
2374 struct xen_hvm_pagetable_dying a;
2375 int rc = 0;
2376
2377 a.domid = DOMID_SELF;
2378 a.gpa = 0x00;
2379 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2380 if (rc < 0) {
2381 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
2382 return 0;
2383 }
2384 return 1;
2385}
2386
2387void __init xen_hvm_init_mmu_ops(void)
2388{
2389 if (is_pagetable_dying_supported())
2390 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
2391}
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002392#endif
Stefano Stabellini59151002010-06-17 14:22:52 +01002393
Ian Campbellde1ef202009-05-21 10:09:46 +01002394#define REMAP_BATCH_SIZE 16
2395
2396struct remap_data {
2397 unsigned long mfn;
2398 pgprot_t prot;
2399 struct mmu_update *mmu_update;
2400};
2401
2402static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2403 unsigned long addr, void *data)
2404{
2405 struct remap_data *rmd = data;
2406 pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
2407
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -08002408 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
Ian Campbellde1ef202009-05-21 10:09:46 +01002409 rmd->mmu_update->val = pte_val_ma(pte);
2410 rmd->mmu_update++;
2411
2412 return 0;
2413}
2414
2415int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2416 unsigned long addr,
2417 unsigned long mfn, int nr,
2418 pgprot_t prot, unsigned domid)
2419{
2420 struct remap_data rmd;
2421 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
2422 int batch;
2423 unsigned long range;
2424 int err = 0;
2425
2426 prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
2427
Stefano Stabellinie060e7af2010-11-11 12:37:43 -08002428 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) ==
2429 (VM_PFNMAP | VM_RESERVED | VM_IO)));
Ian Campbellde1ef202009-05-21 10:09:46 +01002430
2431 rmd.mfn = mfn;
2432 rmd.prot = prot;
2433
2434 while (nr) {
2435 batch = min(REMAP_BATCH_SIZE, nr);
2436 range = (unsigned long)batch << PAGE_SHIFT;
2437
2438 rmd.mmu_update = mmu_update;
2439 err = apply_to_page_range(vma->vm_mm, addr, range,
2440 remap_area_mfn_pte_fn, &rmd);
2441 if (err)
2442 goto out;
2443
2444 err = -EFAULT;
2445 if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0)
2446 goto out;
2447
2448 nr -= batch;
2449 addr += range;
2450 }
2451
2452 err = 0;
2453out:
2454
2455 flush_tlb_all();
2456
2457 return err;
2458}
2459EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);