blob: 86e02eabb640bd889c94ccb563181cd30690ceeb [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070043#include <linux/debugfs.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044#include <linux/bug.h>
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -070045#include <linux/vmalloc.h>
Randy Dunlap44408ad2009-05-12 13:31:40 -070046#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090047#include <linux/gfp.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -070048#include <linux/memblock.h>
Konrad Rzeszutek Wilk2222e712010-12-22 08:57:30 -050049#include <linux/seq_file.h>
Olaf Hering34b6f012012-10-01 21:18:01 +020050#include <linux/crash_dump.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070051
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -080052#include <trace/events/xen.h>
53
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070054#include <asm/pgtable.h>
55#include <asm/tlbflush.h>
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -070056#include <asm/fixmap.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070057#include <asm/mmu_context.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080058#include <asm/setup.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070059#include <asm/paravirt.h>
Alex Nixon7347b402010-02-19 13:31:06 -050060#include <asm/e820.h>
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070061#include <asm/linkage.h>
Alex Nixon08bbc9d2009-02-09 12:05:46 -080062#include <asm/page.h>
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -070063#include <asm/init.h>
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -070064#include <asm/pat.h>
Andrew Jones900cba82009-12-18 10:31:31 +010065#include <asm/smp.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070066
67#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070068#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070069
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080070#include <xen/xen.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070071#include <xen/page.h>
72#include <xen/interface/xen.h>
Stefano Stabellini59151002010-06-17 14:22:52 +010073#include <xen/interface/hvm/hvm_op.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080074#include <xen/interface/version.h>
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080075#include <xen/interface/memory.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080076#include <xen/hvc-console.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070077
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070078#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070079#include "mmu.h"
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070080#include "debugfs.h"
81
Alex Nixon19001c82009-02-09 12:05:46 -080082/*
83 * Protects atomic reservation decrease/increase against concurrent increases.
Daniel Kiper06f521d2011-03-08 22:45:46 +010084 * Also protects non-atomic updates of current_pages and balloon lists.
Alex Nixon19001c82009-02-09 12:05:46 -080085 */
86DEFINE_SPINLOCK(xen_reservation_lock);
87
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -040088#ifdef CONFIG_X86_32
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080089/*
90 * Identity map, in addition to plain kernel map. This needs to be
91 * large enough to allocate page table pages to allocate the rest.
92 * Each page can map 2MB.
93 */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -070094#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
95static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -040096#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080097#ifdef CONFIG_X86_64
98/* l3 pud for userspace vsyscall mapping */
99static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
100#endif /* CONFIG_X86_64 */
101
102/*
103 * Note about cr3 (pagetable base) values:
104 *
105 * xen_cr3 contains the current logical cr3 value; it contains the
106 * last set cr3. This may not be the current effective cr3, because
107 * its update may be being lazily deferred. However, a vcpu looking
108 * at its own cr3 can use this value knowing that it everything will
109 * be self-consistent.
110 *
111 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
112 * hypercall to set the vcpu cr3 is complete (so it may be a little
113 * out of date, but it will never be set early). If one vcpu is
114 * looking at another vcpu's cr3 value, it should use this variable.
115 */
116DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
117DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
118
119
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700120/*
121 * Just beyond the highest usermode address. STACK_TOP_MAX has a
122 * redzone above it, so round it up to a PGD boundary.
123 */
124#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
125
Jeremy Fitzhardinge9976b392009-02-27 09:19:26 -0800126unsigned long arbitrary_virt_to_mfn(void *vaddr)
127{
128 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
129
130 return PFN_DOWN(maddr.maddr);
131}
132
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700133xmaddr_t arbitrary_virt_to_machine(void *vaddr)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700134{
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700135 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100136 unsigned int level;
Chris Lalancette9f32d212008-10-23 17:40:25 -0700137 pte_t *pte;
138 unsigned offset;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700139
Chris Lalancette9f32d212008-10-23 17:40:25 -0700140 /*
141 * if the PFN is in the linear mapped vaddr range, we can just use
142 * the (quick) virt_to_machine() p2m lookup
143 */
144 if (virt_addr_valid(vaddr))
145 return virt_to_machine(vaddr);
146
147 /* otherwise we have to do a (slower) full page-table walk */
148
149 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700150 BUG_ON(pte == NULL);
Chris Lalancette9f32d212008-10-23 17:40:25 -0700151 offset = address & ~PAGE_MASK;
Jeremy Fitzhardingeebd879e2008-07-08 15:06:54 -0700152 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700153}
Stephen Rothwellde23be52011-01-15 10:36:26 +1100154EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700155
156void make_lowmem_page_readonly(void *vaddr)
157{
158 pte_t *pte, ptev;
159 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100160 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700161
Ingo Molnarf0646e42008-01-30 13:33:43 +0100162 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700163 if (pte == NULL)
164 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700165
166 ptev = pte_wrprotect(*pte);
167
168 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
169 BUG();
170}
171
172void make_lowmem_page_readwrite(void *vaddr)
173{
174 pte_t *pte, ptev;
175 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100176 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700177
Ingo Molnarf0646e42008-01-30 13:33:43 +0100178 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700179 if (pte == NULL)
180 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700181
182 ptev = pte_mkwrite(*pte);
183
184 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
185 BUG();
186}
187
188
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700189static bool xen_page_pinned(void *ptr)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100190{
191 struct page *page = virt_to_page(ptr);
192
193 return PagePinned(page);
194}
195
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800196void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800197{
198 struct multicall_space mcs;
199 struct mmu_update *u;
200
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800201 trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
202
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800203 mcs = xen_mc_entry(sizeof(*u));
204 u = mcs.args;
205
206 /* ptep might be kmapped when using 32-bit HIGHPTE */
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -0800207 u->ptr = virt_to_machine(ptep).maddr;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800208 u->val = pte_val_ma(pteval);
209
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800210 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800211
212 xen_mc_issue(PARAVIRT_LAZY_MMU);
213}
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800214EXPORT_SYMBOL_GPL(xen_set_domain_pte);
215
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700216static void xen_extend_mmu_update(const struct mmu_update *update)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700217{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700218 struct multicall_space mcs;
219 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700220
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700221 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
222
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700223 if (mcs.mc != NULL) {
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700224 mcs.mc->args[1]++;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700225 } else {
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700226 mcs = __xen_mc_entry(sizeof(*u));
227 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
228 }
229
230 u = mcs.args;
231 *u = *update;
232}
233
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800234static void xen_extend_mmuext_op(const struct mmuext_op *op)
235{
236 struct multicall_space mcs;
237 struct mmuext_op *u;
238
239 mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
240
241 if (mcs.mc != NULL) {
242 mcs.mc->args[1]++;
243 } else {
244 mcs = __xen_mc_entry(sizeof(*u));
245 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
246 }
247
248 u = mcs.args;
249 *u = *op;
250}
251
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800252static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700253{
254 struct mmu_update u;
255
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700256 preempt_disable();
257
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700258 xen_mc_batch();
259
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700260 /* ptr may be ioremapped for 64-bit pagetable setup */
261 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700262 u.val = pmd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700263 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700264
265 xen_mc_issue(PARAVIRT_LAZY_MMU);
266
267 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700268}
269
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800270static void xen_set_pmd(pmd_t *ptr, pmd_t val)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100271{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800272 trace_xen_mmu_set_pmd(ptr, val);
273
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100274 /* If page is not pinned, we can just update the entry
275 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700276 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100277 *ptr = val;
278 return;
279 }
280
281 xen_set_pmd_hyper(ptr, val);
282}
283
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700284/*
285 * Associate a virtual page frame with a given physical page frame
286 * and protection flags for that frame.
287 */
288void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
289{
Jeremy Fitzhardinge836fe2f2008-07-08 15:06:58 -0700290 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700291}
292
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800293static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
294{
295 struct mmu_update u;
296
297 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
298 return false;
299
300 xen_mc_batch();
301
302 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
303 u.val = pte_val_ma(pteval);
304 xen_extend_mmu_update(&u);
305
306 xen_mc_issue(PARAVIRT_LAZY_MMU);
307
308 return true;
309}
310
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800311static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800312{
David Vrabeld095d432012-07-09 11:39:05 +0100313 if (!xen_batched_set_pte(ptep, pteval)) {
314 /*
315 * Could call native_set_pte() here and trap and
316 * emulate the PTE write but with 32-bit guests this
317 * needs two traps (one for each of the two 32-bit
318 * words in the PTE) so do one hypercall directly
319 * instead.
320 */
321 struct mmu_update u;
322
323 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
324 u.val = pte_val_ma(pteval);
325 HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
326 }
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800327}
328
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800329static void xen_set_pte(pte_t *ptep, pte_t pteval)
330{
331 trace_xen_mmu_set_pte(ptep, pteval);
332 __xen_set_pte(ptep, pteval);
333}
334
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800335static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700336 pte_t *ptep, pte_t pteval)
337{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800338 trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
339 __xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700340}
341
Tejf63c2f22008-12-16 11:56:06 -0800342pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
343 unsigned long addr, pte_t *ptep)
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700344{
345 /* Just return the pte as-is. We preserve the bits on commit */
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800346 trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700347 return *ptep;
348}
349
350void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
351 pte_t *ptep, pte_t pte)
352{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700353 struct mmu_update u;
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700354
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800355 trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700356 xen_mc_batch();
357
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -0800358 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700359 u.val = pte_val_ma(pte);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700360 xen_extend_mmu_update(&u);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700361
362 xen_mc_issue(PARAVIRT_LAZY_MMU);
363}
364
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700365/* Assume pteval_t is equivalent to all the other *val_t types. */
366static pteval_t pte_mfn_to_pfn(pteval_t val)
367{
David Vrabel5926f872014-03-25 10:38:37 +0000368 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700369 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Konrad Rzeszutek Wilkb7e5ffe2012-05-03 16:14:14 -0400370 unsigned long pfn = mfn_to_pfn(mfn);
371
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700372 pteval_t flags = val & PTE_FLAGS_MASK;
Konrad Rzeszutek Wilkb7e5ffe2012-05-03 16:14:14 -0400373 if (unlikely(pfn == ~0))
374 val = flags & ~_PAGE_PRESENT;
375 else
376 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700377 }
378
379 return val;
380}
381
382static pteval_t pte_pfn_to_mfn(pteval_t val)
383{
David Vrabel5926f872014-03-25 10:38:37 +0000384 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700385 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700386 pteval_t flags = val & PTE_FLAGS_MASK;
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500387 unsigned long mfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700388
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500389 if (!xen_feature(XENFEAT_auto_translated_physmap))
390 mfn = get_phys_to_machine(pfn);
391 else
392 mfn = pfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700393 /*
394 * If there's no mfn for the pfn, then just create an
395 * empty non-present pte. Unfortunately this loses
396 * information about the original pfn, so
397 * pte_mfn_to_pfn is asymmetric.
398 */
399 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
400 mfn = 0;
401 flags = 0;
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500402 } else {
403 /*
404 * Paramount to do this test _after_ the
405 * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY &
406 * IDENTITY_FRAME_BIT resolves to true.
407 */
408 mfn &= ~FOREIGN_FRAME_BIT;
409 if (mfn & IDENTITY_FRAME_BIT) {
410 mfn &= ~IDENTITY_FRAME_BIT;
411 flags |= _PAGE_IOMAP;
412 }
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700413 }
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700414 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700415 }
416
417 return val;
418}
419
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800420static pteval_t iomap_pte(pteval_t val)
421{
422 if (val & _PAGE_PRESENT) {
423 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
424 pteval_t flags = val & PTE_FLAGS_MASK;
425
426 /* We assume the pte frame number is a MFN, so
427 just use it as-is. */
428 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
429 }
430
431 return val;
432}
433
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700434__visible pteval_t xen_pte_val(pte_t pte)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700435{
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700436 pteval_t pteval = pte.pte;
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500437#if 0
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700438 /* If this is a WC pte, convert back from Xen WC to Linux WC */
439 if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {
440 WARN_ON(!pat_enabled);
441 pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
442 }
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500443#endif
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700444 if (xen_initial_domain() && (pteval & _PAGE_IOMAP))
445 return pteval;
446
447 return pte_mfn_to_pfn(pteval);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700448}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800449PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700450
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700451__visible pgdval_t xen_pgd_val(pgd_t pgd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700452{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700453 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700454}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800455PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700456
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700457/*
458 * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7
459 * are reserved for now, to correspond to the Intel-reserved PAT
460 * types.
461 *
462 * We expect Linux's PAT set as follows:
463 *
464 * Idx PTE flags Linux Xen Default
465 * 0 WB WB WB
466 * 1 PWT WC WT WT
467 * 2 PCD UC- UC- UC-
468 * 3 PCD PWT UC UC UC
469 * 4 PAT WB WC WB
470 * 5 PAT PWT WC WP WT
Konrad Rzeszutek Wilkb1922a52013-09-25 15:27:50 -0400471 * 6 PAT PCD UC- rsv UC-
472 * 7 PAT PCD PWT UC rsv UC
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700473 */
474
475void xen_set_pat(u64 pat)
476{
477 /* We expect Linux to use a PAT setting of
478 * UC UC- WC WB (ignoring the PAT flag) */
479 WARN_ON(pat != 0x0007010600070106ull);
480}
481
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700482__visible pte_t xen_make_pte(pteval_t pte)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700483{
Alex Nixon7347b402010-02-19 13:31:06 -0500484 phys_addr_t addr = (pte & PTE_PFN_MASK);
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500485#if 0
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700486 /* If Linux is trying to set a WC pte, then map to the Xen WC.
487 * If _PAGE_PAT is set, then it probably means it is really
488 * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope
489 * things work out OK...
490 *
491 * (We should never see kernel mappings with _PAGE_PSE set,
492 * but we could see hugetlbfs mappings, I think.).
493 */
494 if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) {
495 if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)
496 pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
497 }
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500498#endif
Alex Nixon7347b402010-02-19 13:31:06 -0500499 /*
500 * Unprivileged domains are allowed to do IOMAPpings for
501 * PCI passthrough, but not map ISA space. The ISA
502 * mappings are just dummy local mappings to keep other
503 * parts of the kernel happy.
504 */
505 if (unlikely(pte & _PAGE_IOMAP) &&
506 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800507 pte = iomap_pte(pte);
Alex Nixon7347b402010-02-19 13:31:06 -0500508 } else {
509 pte &= ~_PAGE_IOMAP;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800510 pte = pte_pfn_to_mfn(pte);
Alex Nixon7347b402010-02-19 13:31:06 -0500511 }
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800512
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700513 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700514}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800515PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700516
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700517__visible pgd_t xen_make_pgd(pgdval_t pgd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700518{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700519 pgd = pte_pfn_to_mfn(pgd);
520 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700521}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800522PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700523
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700524__visible pmdval_t xen_pmd_val(pmd_t pmd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700525{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700526 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700527}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800528PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100529
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800530static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700531{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700532 struct mmu_update u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700533
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700534 preempt_disable();
535
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700536 xen_mc_batch();
537
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700538 /* ptr may be ioremapped for 64-bit pagetable setup */
539 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700540 u.val = pud_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700541 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700542
543 xen_mc_issue(PARAVIRT_LAZY_MMU);
544
545 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700546}
547
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800548static void xen_set_pud(pud_t *ptr, pud_t val)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100549{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800550 trace_xen_mmu_set_pud(ptr, val);
551
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100552 /* If page is not pinned, we can just update the entry
553 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700554 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100555 *ptr = val;
556 return;
557 }
558
559 xen_set_pud_hyper(ptr, val);
560}
561
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700562#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800563static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700564{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800565 trace_xen_mmu_set_pte_atomic(ptep, pte);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700566 set_64bit((u64 *)ptep, native_pte_val(pte));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700567}
568
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800569static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700570{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800571 trace_xen_mmu_pte_clear(mm, addr, ptep);
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800572 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
573 native_pte_clear(mm, addr, ptep);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700574}
575
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800576static void xen_pmd_clear(pmd_t *pmdp)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700577{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800578 trace_xen_mmu_pmd_clear(pmdp);
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100579 set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700580}
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700581#endif /* CONFIG_X86_PAE */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700582
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700583__visible pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700584{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700585 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700586 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700587}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800588PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700589
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700590#if PAGETABLE_LEVELS == 4
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700591__visible pudval_t xen_pud_val(pud_t pud)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700592{
593 return pte_mfn_to_pfn(pud.pud);
594}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800595PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700596
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700597__visible pud_t xen_make_pud(pudval_t pud)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700598{
599 pud = pte_pfn_to_mfn(pud);
600
601 return native_make_pud(pud);
602}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800603PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700604
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800605static pgd_t *xen_get_user_pgd(pgd_t *pgd)
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700606{
607 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
608 unsigned offset = pgd - pgd_page;
609 pgd_t *user_ptr = NULL;
610
611 if (offset < pgd_index(USER_LIMIT)) {
612 struct page *page = virt_to_page(pgd_page);
613 user_ptr = (pgd_t *)page->private;
614 if (user_ptr)
615 user_ptr += offset;
616 }
617
618 return user_ptr;
619}
620
621static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700622{
623 struct mmu_update u;
624
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700625 u.ptr = virt_to_machine(ptr).maddr;
626 u.val = pgd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700627 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700628}
629
630/*
631 * Raw hypercall-based set_pgd, intended for in early boot before
632 * there's a page structure. This implies:
633 * 1. The only existing pagetable is the kernel's
634 * 2. It is always pinned
635 * 3. It has no user pagetable attached to it
636 */
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800637static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700638{
639 preempt_disable();
640
641 xen_mc_batch();
642
643 __xen_set_pgd_hyper(ptr, val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700644
645 xen_mc_issue(PARAVIRT_LAZY_MMU);
646
647 preempt_enable();
648}
649
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800650static void xen_set_pgd(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700651{
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700652 pgd_t *user_ptr = xen_get_user_pgd(ptr);
653
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800654 trace_xen_mmu_set_pgd(ptr, user_ptr, val);
655
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700656 /* If page is not pinned, we can just update the entry
657 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700658 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700659 *ptr = val;
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700660 if (user_ptr) {
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700661 WARN_ON(xen_page_pinned(user_ptr));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700662 *user_ptr = val;
663 }
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700664 return;
665 }
666
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700667 /* If it's pinned, then we can at least batch the kernel and
668 user updates together. */
669 xen_mc_batch();
670
671 __xen_set_pgd_hyper(ptr, val);
672 if (user_ptr)
673 __xen_set_pgd_hyper(user_ptr, val);
674
675 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700676}
677#endif /* PAGETABLE_LEVELS == 4 */
678
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700679/*
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700680 * (Yet another) pagetable walker. This one is intended for pinning a
681 * pagetable. This means that it walks a pagetable and calls the
682 * callback function on each page it finds making up the page table,
683 * at every level. It walks the entire pagetable, but it only bothers
684 * pinning pte pages which are below limit. In the normal case this
685 * will be STACK_TOP_MAX, but at boot we need to pin up to
686 * FIXADDR_TOP.
687 *
688 * For 32-bit the important bit is that we don't pin beyond there,
689 * because then we start getting into Xen's ptes.
690 *
691 * For 64-bit, we must skip the Xen hole in the middle of the address
692 * space, just after the big x86-64 virtual hole.
693 */
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000694static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
695 int (*func)(struct mm_struct *mm, struct page *,
696 enum pt_level),
697 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700698{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700699 int flush = 0;
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700700 unsigned hole_low, hole_high;
701 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
702 unsigned pgdidx, pudidx, pmdidx;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700703
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700704 /* The limit is the last byte to be touched */
705 limit--;
706 BUG_ON(limit >= FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700707
708 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700709 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700710
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700711 /*
712 * 64-bit has a great big hole in the middle of the address
713 * space, which contains the Xen mappings. On 32-bit these
714 * will end up making a zero-sized hole and so is a no-op.
715 */
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700716 hole_low = pgd_index(USER_LIMIT);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700717 hole_high = pgd_index(PAGE_OFFSET);
718
719 pgdidx_limit = pgd_index(limit);
720#if PTRS_PER_PUD > 1
721 pudidx_limit = pud_index(limit);
722#else
723 pudidx_limit = 0;
724#endif
725#if PTRS_PER_PMD > 1
726 pmdidx_limit = pmd_index(limit);
727#else
728 pmdidx_limit = 0;
729#endif
730
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700731 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700732 pud_t *pud;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700733
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700734 if (pgdidx >= hole_low && pgdidx < hole_high)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700735 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700736
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700737 if (!pgd_val(pgd[pgdidx]))
738 continue;
739
740 pud = pud_offset(&pgd[pgdidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700741
742 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700743 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700744
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700745 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700746 pmd_t *pmd;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700747
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700748 if (pgdidx == pgdidx_limit &&
749 pudidx > pudidx_limit)
750 goto out;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700751
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700752 if (pud_none(pud[pudidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700753 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700754
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700755 pmd = pmd_offset(&pud[pudidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700756
757 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700758 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700759
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700760 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
761 struct page *pte;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700762
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700763 if (pgdidx == pgdidx_limit &&
764 pudidx == pudidx_limit &&
765 pmdidx > pmdidx_limit)
766 goto out;
767
768 if (pmd_none(pmd[pmdidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700769 continue;
770
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700771 pte = pmd_page(pmd[pmdidx]);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700772 flush |= (*func)(mm, pte, PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700773 }
774 }
775 }
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700776
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700777out:
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700778 /* Do the top level last, so that the callbacks can use it as
779 a cue to do final things like tlb flushes. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700780 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700781
782 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700783}
784
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000785static int xen_pgd_walk(struct mm_struct *mm,
786 int (*func)(struct mm_struct *mm, struct page *,
787 enum pt_level),
788 unsigned long limit)
789{
790 return __xen_pgd_walk(mm, mm->pgd, func, limit);
791}
792
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700793/* If we're using split pte locks, then take the page's lock and
794 return a pointer to it. Otherwise return NULL. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700795static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700796{
797 spinlock_t *ptl = NULL;
798
Kirill A. Shutemov57c1ffc2013-11-14 14:30:45 -0800799#if USE_SPLIT_PTE_PTLOCKS
Kirill A. Shutemov49076ec2013-11-14 14:31:51 -0800800 ptl = ptlock_ptr(page);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700801 spin_lock_nest_lock(ptl, &mm->page_table_lock);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700802#endif
803
804 return ptl;
805}
806
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700807static void xen_pte_unlock(void *v)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700808{
809 spinlock_t *ptl = v;
810 spin_unlock(ptl);
811}
812
813static void xen_do_pin(unsigned level, unsigned long pfn)
814{
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800815 struct mmuext_op op;
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700816
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800817 op.cmd = level;
818 op.arg1.mfn = pfn_to_mfn(pfn);
819
820 xen_extend_mmuext_op(&op);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700821}
822
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700823static int xen_pin_page(struct mm_struct *mm, struct page *page,
824 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700825{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700826 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700827 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700828
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700829 if (pgfl)
830 flush = 0; /* already pinned */
831 else if (PageHighMem(page))
832 /* kmaps need flushing if we found an unpinned
833 highpage */
834 flush = 1;
835 else {
836 void *pt = lowmem_page_address(page);
837 unsigned long pfn = page_to_pfn(page);
838 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700839 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700840
841 flush = 0;
842
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700843 /*
844 * We need to hold the pagetable lock between the time
845 * we make the pagetable RO and when we actually pin
846 * it. If we don't, then other users may come in and
847 * attempt to update the pagetable by writing it,
848 * which will fail because the memory is RO but not
849 * pinned, so Xen won't do the trap'n'emulate.
850 *
851 * If we're using split pte locks, we can't hold the
852 * entire pagetable's worth of locks during the
853 * traverse, because we may wrap the preempt count (8
854 * bits). The solution is to mark RO and pin each PTE
855 * page while holding the lock. This means the number
856 * of locks we end up holding is never more than a
857 * batch size (~32 entries, at present).
858 *
859 * If we're not using split pte locks, we needn't pin
860 * the PTE pages independently, because we're
861 * protected by the overall pagetable lock.
862 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700863 ptl = NULL;
864 if (level == PT_PTE)
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700865 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700866
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700867 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
868 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700869 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
870
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700871 if (ptl) {
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700872 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
873
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700874 /* Queue a deferred unlock for when this batch
875 is completed. */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700876 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700877 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700878 }
879
880 return flush;
881}
882
883/* This is called just after a mm has been created, but it has not
884 been used yet. We need to make sure that its pagetable is all
885 read-only, and can be pinned. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700886static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700887{
Jeremy Fitzhardinge5f94fb52010-12-17 15:31:23 -0800888 trace_xen_mmu_pgd_pin(mm, pgd);
889
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700890 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700891
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000892 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100893 /* re-enable interrupts for flushing */
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700894 xen_mc_issue(0);
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100895
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700896 kmap_flush_unused();
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100897
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700898 xen_mc_batch();
899 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700900
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700901#ifdef CONFIG_X86_64
902 {
903 pgd_t *user_pgd = xen_get_user_pgd(pgd);
904
905 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
906
907 if (user_pgd) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700908 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
Tejf63c2f22008-12-16 11:56:06 -0800909 xen_do_pin(MMUEXT_PIN_L4_TABLE,
910 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700911 }
912 }
913#else /* CONFIG_X86_32 */
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700914#ifdef CONFIG_X86_PAE
915 /* Need to make sure unshared kernel PMD is pinnable */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -0800916 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700917 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700918#endif
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100919 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700920#endif /* CONFIG_X86_64 */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700921 xen_mc_issue(0);
922}
923
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700924static void xen_pgd_pin(struct mm_struct *mm)
925{
926 __xen_pgd_pin(mm, mm->pgd);
927}
928
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100929/*
930 * On save, we need to pin all pagetables to make sure they get their
931 * mfns turned into pfns. Search the list for any unpinned pgds and pin
932 * them (unpinned pgds are not currently in use, probably because the
933 * process is under construction or destruction).
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700934 *
935 * Expected to be called in stop_machine() ("equivalent to taking
936 * every spinlock in the system"), so the locking doesn't really
937 * matter all that much.
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100938 */
939void xen_mm_pin_all(void)
940{
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100941 struct page *page;
942
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800943 spin_lock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100944
945 list_for_each_entry(page, &pgd_list, lru) {
946 if (!PagePinned(page)) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700947 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100948 SetPageSavePinned(page);
949 }
950 }
951
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800952 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100953}
954
Eduardo Habkostc1f2f092008-07-08 15:06:24 -0700955/*
956 * The init_mm pagetable is really pinned as soon as its created, but
957 * that's before we have page structures to store the bits. So do all
958 * the book-keeping now.
959 */
Daniel Kiper3f5089532011-05-12 17:19:53 -0400960static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700961 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700962{
963 SetPagePinned(page);
964 return 0;
965}
966
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -0700967static void __init xen_mark_init_mm_pinned(void)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700968{
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700969 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700970}
971
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700972static int xen_unpin_page(struct mm_struct *mm, struct page *page,
973 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700974{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700975 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700976
977 if (pgfl && !PageHighMem(page)) {
978 void *pt = lowmem_page_address(page);
979 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700980 spinlock_t *ptl = NULL;
981 struct multicall_space mcs;
982
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700983 /*
984 * Do the converse to pin_page. If we're using split
985 * pte locks, we must be holding the lock for while
986 * the pte page is unpinned but still RO to prevent
987 * concurrent updates from seeing it in this
988 * partially-pinned state.
989 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700990 if (level == PT_PTE) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700991 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700992
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700993 if (ptl)
994 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700995 }
996
997 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700998
999 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
1000 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001001 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
1002
1003 if (ptl) {
1004 /* unlock when batch completed */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001005 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001006 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001007 }
1008
1009 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001010}
1011
1012/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001013static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001014{
Jeremy Fitzhardinge5f94fb52010-12-17 15:31:23 -08001015 trace_xen_mmu_pgd_unpin(mm, pgd);
1016
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001017 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001018
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001019 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001020
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001021#ifdef CONFIG_X86_64
1022 {
1023 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1024
1025 if (user_pgd) {
Tejf63c2f22008-12-16 11:56:06 -08001026 xen_do_pin(MMUEXT_UNPIN_TABLE,
1027 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001028 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001029 }
1030 }
1031#endif
1032
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001033#ifdef CONFIG_X86_PAE
1034 /* Need to make sure unshared kernel PMD is unpinned */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -08001035 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001036 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001037#endif
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001038
Ian Campbell86bbc2c2008-11-21 10:21:33 +00001039 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001040
1041 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001042}
1043
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001044static void xen_pgd_unpin(struct mm_struct *mm)
1045{
1046 __xen_pgd_unpin(mm, mm->pgd);
1047}
1048
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001049/*
1050 * On resume, undo any pinning done at save, so that the rest of the
1051 * kernel doesn't see any unexpected pinned pagetables.
1052 */
1053void xen_mm_unpin_all(void)
1054{
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001055 struct page *page;
1056
Andrea Arcangelia79e53d2011-02-16 15:45:22 -08001057 spin_lock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001058
1059 list_for_each_entry(page, &pgd_list, lru) {
1060 if (PageSavePinned(page)) {
1061 BUG_ON(!PagePinned(page));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001062 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001063 ClearPageSavePinned(page);
1064 }
1065 }
1066
Andrea Arcangelia79e53d2011-02-16 15:45:22 -08001067 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001068}
1069
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001070static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001071{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001072 spin_lock(&next->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001073 xen_pgd_pin(next);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001074 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001075}
1076
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001077static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001078{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001079 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001080 xen_pgd_pin(mm);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001081 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001082}
1083
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001084
1085#ifdef CONFIG_SMP
1086/* Another cpu may still have their %cr3 pointing at the pagetable, so
1087 we need to repoint it somewhere else before we can unpin it. */
1088static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001089{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001090 struct mm_struct *mm = info;
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001091 struct mm_struct *active_mm;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001092
Alex Shi2113f462012-01-13 23:53:35 +08001093 active_mm = this_cpu_read(cpu_tlbstate.active_mm);
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001094
Alex Shi2113f462012-01-13 23:53:35 +08001095 if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001096 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001097
1098 /* If this cpu still has a stale cr3 reference, then make sure
1099 it has been flushed. */
Alex Shi2113f462012-01-13 23:53:35 +08001100 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001101 load_cr3(swapper_pg_dir);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001102}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001103
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001104static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001105{
Mike Travise4d98202008-12-16 17:34:05 -08001106 cpumask_var_t mask;
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001107 unsigned cpu;
1108
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001109 if (current->active_mm == mm) {
1110 if (current->mm == mm)
1111 load_cr3(swapper_pg_dir);
1112 else
1113 leave_mm(smp_processor_id());
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001114 }
1115
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001116 /* Get the "official" set of cpus referring to our pagetable. */
Mike Travise4d98202008-12-16 17:34:05 -08001117 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1118 for_each_online_cpu(cpu) {
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001119 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
Mike Travise4d98202008-12-16 17:34:05 -08001120 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1121 continue;
1122 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1123 }
1124 return;
1125 }
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001126 cpumask_copy(mask, mm_cpumask(mm));
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001127
1128 /* It's possible that a vcpu may have a stale reference to our
1129 cr3, because its in lazy mode, and it hasn't yet flushed
1130 its set of pending hypercalls yet. In this case, we can
1131 look at its actual current cr3 value, and force it to flush
1132 if needed. */
1133 for_each_online_cpu(cpu) {
1134 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
Mike Travise4d98202008-12-16 17:34:05 -08001135 cpumask_set_cpu(cpu, mask);
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001136 }
1137
Mike Travise4d98202008-12-16 17:34:05 -08001138 if (!cpumask_empty(mask))
1139 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1140 free_cpumask_var(mask);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001141}
1142#else
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001143static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001144{
1145 if (current->active_mm == mm)
1146 load_cr3(swapper_pg_dir);
1147}
1148#endif
1149
1150/*
1151 * While a process runs, Xen pins its pagetables, which means that the
1152 * hypervisor forces it to be read-only, and it controls all updates
1153 * to it. This means that all pagetable updates have to go via the
1154 * hypervisor, which is moderately expensive.
1155 *
1156 * Since we're pulling the pagetable down, we switch to use init_mm,
1157 * unpin old process pagetable and mark it all read-write, which
1158 * allows further operations on it to be simple memory accesses.
1159 *
1160 * The only subtle point is that another CPU may be still using the
1161 * pagetable because of lazy tlb flushing. This means we need need to
1162 * switch all CPUs off this pagetable before we can unpin it.
1163 */
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001164static void xen_exit_mmap(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001165{
1166 get_cpu(); /* make sure we don't move around */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001167 xen_drop_mm_ref(mm);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001168 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001169
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001170 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -07001171
1172 /* pgd may not be pinned in the error exit path of execve */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001173 if (xen_page_pinned(mm->pgd))
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001174 xen_pgd_unpin(mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001175
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001176 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001177}
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07001178
Attilio Raoc7112882012-08-21 21:22:40 +01001179static void xen_post_allocator_init(void);
1180
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001181#ifdef CONFIG_X86_64
1182static void __init xen_cleanhighmap(unsigned long vaddr,
1183 unsigned long vaddr_end)
1184{
1185 unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
1186 pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
1187
1188 /* NOTE: The loop is more greedy than the cleanup_highmap variant.
1189 * We include the PMD passed in on _both_ boundaries. */
1190 for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE));
1191 pmd++, vaddr += PMD_SIZE) {
1192 if (pmd_none(*pmd))
1193 continue;
1194 if (vaddr < (unsigned long) _text || vaddr > kernel_end)
1195 set_pmd(pmd, __pmd(0));
1196 }
1197 /* In case we did something silly, we should crash in this function
1198 * instead of somewhere later and be confusing. */
1199 xen_mc_flush();
1200}
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001201static void __init xen_pagetable_p2m_copy(void)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001202{
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001203 unsigned long size;
1204 unsigned long addr;
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001205 unsigned long new_mfn_list;
1206
1207 if (xen_feature(XENFEAT_auto_translated_physmap))
1208 return;
1209
1210 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1211
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001212 new_mfn_list = xen_revector_p2m_tree();
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001213 /* No memory or already called. */
1214 if (!new_mfn_list || new_mfn_list == xen_start_info->mfn_list)
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001215 return;
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001216
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001217 /* using __ka address and sticking INVALID_P2M_ENTRY! */
1218 memset((void *)xen_start_info->mfn_list, 0xff, size);
1219
1220 /* We should be in __ka space. */
1221 BUG_ON(xen_start_info->mfn_list < __START_KERNEL_map);
1222 addr = xen_start_info->mfn_list;
1223 /* We roundup to the PMD, which means that if anybody at this stage is
1224 * using the __ka address of xen_start_info or xen_start_info->shared_info
1225 * they are in going to crash. Fortunatly we have already revectored
1226 * in xen_setup_kernel_pagetable and in xen_setup_shared_info. */
1227 size = roundup(size, PMD_SIZE);
1228 xen_cleanhighmap(addr, addr + size);
1229
1230 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1231 memblock_free(__pa(xen_start_info->mfn_list), size);
1232 /* And revector! Bye bye old array */
1233 xen_start_info->mfn_list = new_mfn_list;
1234
Konrad Rzeszutek Wilk3aca7fb2012-08-14 14:34:00 -04001235 /* At this stage, cleanup_highmap has already cleaned __ka space
1236 * from _brk_limit way up to the max_pfn_mapped (which is the end of
1237 * the ramdisk). We continue on, erasing PMD entries that point to page
1238 * tables - do note that they are accessible at this stage via __va.
1239 * For good measure we also round up to the PMD - which means that if
1240 * anybody is using __ka address to the initial boot-stack - and try
1241 * to use it - they are going to crash. The xen_start_info has been
1242 * taken care of already in xen_setup_kernel_pagetable. */
1243 addr = xen_start_info->pt_base;
1244 size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE);
1245
1246 xen_cleanhighmap(addr, addr + size);
1247 xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
1248#ifdef DEBUG
1249 /* This is superflous and is not neccessary, but you know what
1250 * lets do it. The MODULES_VADDR -> MODULES_END should be clear of
1251 * anything at this stage. */
1252 xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
1253#endif
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001254}
1255#endif
1256
1257static void __init xen_pagetable_init(void)
1258{
1259 paging_init();
1260 xen_setup_shared_info();
1261#ifdef CONFIG_X86_64
1262 xen_pagetable_p2m_copy();
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001263#endif
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02001264 xen_post_allocator_init();
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001265}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001266static void xen_write_cr2(unsigned long cr2)
1267{
Alex Shi2113f462012-01-13 23:53:35 +08001268 this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001269}
1270
1271static unsigned long xen_read_cr2(void)
1272{
Alex Shi2113f462012-01-13 23:53:35 +08001273 return this_cpu_read(xen_vcpu)->arch.cr2;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001274}
1275
1276unsigned long xen_read_cr2_direct(void)
1277{
Alex Shi2113f462012-01-13 23:53:35 +08001278 return this_cpu_read(xen_vcpu_info.arch.cr2);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001279}
1280
Konrad Rzeszutek Wilk95a7d762012-10-31 12:38:31 -04001281void xen_flush_tlb_all(void)
1282{
1283 struct mmuext_op *op;
1284 struct multicall_space mcs;
1285
1286 trace_xen_mmu_flush_tlb_all(0);
1287
1288 preempt_disable();
1289
1290 mcs = xen_mc_entry(sizeof(*op));
1291
1292 op = mcs.args;
1293 op->cmd = MMUEXT_TLB_FLUSH_ALL;
1294 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1295
1296 xen_mc_issue(PARAVIRT_LAZY_MMU);
1297
1298 preempt_enable();
1299}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001300static void xen_flush_tlb(void)
1301{
1302 struct mmuext_op *op;
1303 struct multicall_space mcs;
1304
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001305 trace_xen_mmu_flush_tlb(0);
1306
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001307 preempt_disable();
1308
1309 mcs = xen_mc_entry(sizeof(*op));
1310
1311 op = mcs.args;
1312 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1313 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1314
1315 xen_mc_issue(PARAVIRT_LAZY_MMU);
1316
1317 preempt_enable();
1318}
1319
1320static void xen_flush_tlb_single(unsigned long addr)
1321{
1322 struct mmuext_op *op;
1323 struct multicall_space mcs;
1324
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001325 trace_xen_mmu_flush_tlb_single(addr);
1326
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001327 preempt_disable();
1328
1329 mcs = xen_mc_entry(sizeof(*op));
1330 op = mcs.args;
1331 op->cmd = MMUEXT_INVLPG_LOCAL;
1332 op->arg1.linear_addr = addr & PAGE_MASK;
1333 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1334
1335 xen_mc_issue(PARAVIRT_LAZY_MMU);
1336
1337 preempt_enable();
1338}
1339
1340static void xen_flush_tlb_others(const struct cpumask *cpus,
Alex Shie7b52ff2012-06-28 09:02:17 +08001341 struct mm_struct *mm, unsigned long start,
1342 unsigned long end)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001343{
1344 struct {
1345 struct mmuext_op op;
Konrad Rzeszutek Wilk32dd1192011-06-30 09:12:40 -04001346#ifdef CONFIG_SMP
Andrew Jones900cba82009-12-18 10:31:31 +01001347 DECLARE_BITMAP(mask, num_processors);
Konrad Rzeszutek Wilk32dd1192011-06-30 09:12:40 -04001348#else
1349 DECLARE_BITMAP(mask, NR_CPUS);
1350#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001351 } *args;
1352 struct multicall_space mcs;
1353
Alex Shie7b52ff2012-06-28 09:02:17 +08001354 trace_xen_mmu_flush_tlb_others(cpus, mm, start, end);
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001355
Jeremy Fitzhardingee3f8a742009-03-04 17:36:57 -08001356 if (cpumask_empty(cpus))
1357 return; /* nothing to do */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001358
1359 mcs = xen_mc_entry(sizeof(*args));
1360 args = mcs.args;
1361 args->op.arg2.vcpumask = to_cpumask(args->mask);
1362
1363 /* Remove us, and any offline CPUS. */
1364 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1365 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001366
Alex Shie7b52ff2012-06-28 09:02:17 +08001367 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
Alex Shice7184b2012-08-24 08:55:13 +00001368 if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001369 args->op.cmd = MMUEXT_INVLPG_MULTI;
Alex Shie7b52ff2012-06-28 09:02:17 +08001370 args->op.arg1.linear_addr = start;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001371 }
1372
1373 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1374
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001375 xen_mc_issue(PARAVIRT_LAZY_MMU);
1376}
1377
1378static unsigned long xen_read_cr3(void)
1379{
Alex Shi2113f462012-01-13 23:53:35 +08001380 return this_cpu_read(xen_cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001381}
1382
1383static void set_current_cr3(void *v)
1384{
Alex Shi2113f462012-01-13 23:53:35 +08001385 this_cpu_write(xen_current_cr3, (unsigned long)v);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001386}
1387
1388static void __xen_write_cr3(bool kernel, unsigned long cr3)
1389{
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001390 struct mmuext_op op;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001391 unsigned long mfn;
1392
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001393 trace_xen_mmu_write_cr3(kernel, cr3);
1394
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001395 if (cr3)
1396 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1397 else
1398 mfn = 0;
1399
1400 WARN_ON(mfn == 0 && kernel);
1401
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001402 op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1403 op.arg1.mfn = mfn;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001404
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001405 xen_extend_mmuext_op(&op);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001406
1407 if (kernel) {
Alex Shi2113f462012-01-13 23:53:35 +08001408 this_cpu_write(xen_cr3, cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001409
1410 /* Update xen_current_cr3 once the batch has actually
1411 been submitted. */
1412 xen_mc_callback(set_current_cr3, (void *)cr3);
1413 }
1414}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001415static void xen_write_cr3(unsigned long cr3)
1416{
1417 BUG_ON(preemptible());
1418
1419 xen_mc_batch(); /* disables interrupts */
1420
1421 /* Update while interrupts are disabled, so its atomic with
1422 respect to ipis */
Alex Shi2113f462012-01-13 23:53:35 +08001423 this_cpu_write(xen_cr3, cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001424
1425 __xen_write_cr3(true, cr3);
1426
1427#ifdef CONFIG_X86_64
1428 {
1429 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1430 if (user_pgd)
1431 __xen_write_cr3(false, __pa(user_pgd));
1432 else
1433 __xen_write_cr3(false, 0);
1434 }
1435#endif
1436
1437 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1438}
1439
Konrad Rzeszutek Wilk0cc91292013-02-22 17:35:13 -08001440#ifdef CONFIG_X86_64
1441/*
1442 * At the start of the day - when Xen launches a guest, it has already
1443 * built pagetables for the guest. We diligently look over them
1444 * in xen_setup_kernel_pagetable and graft as appropiate them in the
1445 * init_level4_pgt and its friends. Then when we are happy we load
1446 * the new init_level4_pgt - and continue on.
1447 *
1448 * The generic code starts (start_kernel) and 'init_mem_mapping' sets
1449 * up the rest of the pagetables. When it has completed it loads the cr3.
1450 * N.B. that baremetal would start at 'start_kernel' (and the early
1451 * #PF handler would create bootstrap pagetables) - so we are running
1452 * with the same assumptions as what to do when write_cr3 is executed
1453 * at this point.
1454 *
1455 * Since there are no user-page tables at all, we have two variants
1456 * of xen_write_cr3 - the early bootup (this one), and the late one
1457 * (xen_write_cr3). The reason we have to do that is that in 64-bit
1458 * the Linux kernel and user-space are both in ring 3 while the
1459 * hypervisor is in ring 0.
1460 */
1461static void __init xen_write_cr3_init(unsigned long cr3)
1462{
1463 BUG_ON(preemptible());
1464
1465 xen_mc_batch(); /* disables interrupts */
1466
1467 /* Update while interrupts are disabled, so its atomic with
1468 respect to ipis */
1469 this_cpu_write(xen_cr3, cr3);
1470
1471 __xen_write_cr3(true, cr3);
1472
1473 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
Konrad Rzeszutek Wilk0cc91292013-02-22 17:35:13 -08001474}
1475#endif
1476
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001477static int xen_pgd_alloc(struct mm_struct *mm)
1478{
1479 pgd_t *pgd = mm->pgd;
1480 int ret = 0;
1481
1482 BUG_ON(PagePinned(virt_to_page(pgd)));
1483
1484#ifdef CONFIG_X86_64
1485 {
1486 struct page *page = virt_to_page(pgd);
1487 pgd_t *user_pgd;
1488
1489 BUG_ON(page->private != 0);
1490
1491 ret = -ENOMEM;
1492
1493 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1494 page->private = (unsigned long)user_pgd;
1495
1496 if (user_pgd != NULL) {
1497 user_pgd[pgd_index(VSYSCALL_START)] =
1498 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1499 ret = 0;
1500 }
1501
1502 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1503 }
1504#endif
1505
1506 return ret;
1507}
1508
1509static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1510{
1511#ifdef CONFIG_X86_64
1512 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1513
1514 if (user_pgd)
1515 free_page((unsigned long)user_pgd);
1516#endif
1517}
1518
Stefano Stabelliniee176452011-04-19 14:47:31 +01001519#ifdef CONFIG_X86_32
Daniel Kiper3f5089532011-05-12 17:19:53 -04001520static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001521{
1522 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1523 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1524 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1525 pte_val_ma(pte));
Stefano Stabelliniee176452011-04-19 14:47:31 +01001526
1527 return pte;
1528}
1529#else /* CONFIG_X86_64 */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001530static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
Stefano Stabelliniee176452011-04-19 14:47:31 +01001531{
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001532 return pte;
1533}
Stefano Stabelliniee176452011-04-19 14:47:31 +01001534#endif /* CONFIG_X86_64 */
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001535
David Vrabeld095d432012-07-09 11:39:05 +01001536/*
1537 * Init-time set_pte while constructing initial pagetables, which
1538 * doesn't allow RO page table pages to be remapped RW.
1539 *
David Vrabel66a27dd2012-07-09 11:39:06 +01001540 * If there is no MFN for this PFN then this page is initially
1541 * ballooned out so clear the PTE (as in decrease_reservation() in
1542 * drivers/xen/balloon.c).
1543 *
David Vrabeld095d432012-07-09 11:39:05 +01001544 * Many of these PTE updates are done on unpinned and writable pages
1545 * and doing a hypercall for these is unnecessary and expensive. At
1546 * this point it is not possible to tell if a page is pinned or not,
1547 * so always write the PTE directly and rely on Xen trapping and
1548 * emulating any updates as necessary.
1549 */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001550static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001551{
David Vrabel66a27dd2012-07-09 11:39:06 +01001552 if (pte_mfn(pte) != INVALID_P2M_ENTRY)
1553 pte = mask_rw_pte(ptep, pte);
1554 else
1555 pte = __pte_ma(0);
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001556
David Vrabeld095d432012-07-09 11:39:05 +01001557 native_set_pte(ptep, pte);
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001558}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001559
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001560static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1561{
1562 struct mmuext_op op;
1563 op.cmd = cmd;
1564 op.arg1.mfn = pfn_to_mfn(pfn);
1565 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1566 BUG();
1567}
1568
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001569/* Early in boot, while setting up the initial pagetable, assume
1570 everything is pinned. */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001571static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001572{
1573#ifdef CONFIG_FLATMEM
1574 BUG_ON(mem_map); /* should only be used early */
1575#endif
1576 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001577 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1578}
1579
1580/* Used for pmd and pud */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001581static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001582{
1583#ifdef CONFIG_FLATMEM
1584 BUG_ON(mem_map); /* should only be used early */
1585#endif
1586 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001587}
1588
1589/* Early release_pte assumes that all pts are pinned, since there's
1590 only init_mm and anything attached to that is pinned. */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001591static void __init xen_release_pte_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001592{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001593 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001594 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1595}
1596
Daniel Kiper3f5089532011-05-12 17:19:53 -04001597static void __init xen_release_pmd_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001598{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001599 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001600}
1601
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001602static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1603{
1604 struct multicall_space mcs;
1605 struct mmuext_op *op;
1606
1607 mcs = __xen_mc_entry(sizeof(*op));
1608 op = mcs.args;
1609 op->cmd = cmd;
1610 op->arg1.mfn = pfn_to_mfn(pfn);
1611
1612 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1613}
1614
1615static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1616{
1617 struct multicall_space mcs;
1618 unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1619
1620 mcs = __xen_mc_entry(0);
1621 MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1622 pfn_pte(pfn, prot), 0);
1623}
1624
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001625/* This needs to make sure the new pte page is pinned iff its being
1626 attached to a pinned pagetable. */
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001627static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1628 unsigned level)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001629{
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001630 bool pinned = PagePinned(virt_to_page(mm->pgd));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001631
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001632 trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001633
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001634 if (pinned) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001635 struct page *page = pfn_to_page(pfn);
1636
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001637 SetPagePinned(page);
1638
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001639 if (!PageHighMem(page)) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001640 xen_mc_batch();
1641
1642 __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1643
Kirill A. Shutemov57c1ffc2013-11-14 14:30:45 -08001644 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001645 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1646
1647 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001648 } else {
1649 /* make sure there are no stray mappings of
1650 this page */
1651 kmap_flush_unused();
1652 }
1653 }
1654}
1655
1656static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1657{
1658 xen_alloc_ptpage(mm, pfn, PT_PTE);
1659}
1660
1661static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1662{
1663 xen_alloc_ptpage(mm, pfn, PT_PMD);
1664}
1665
1666/* This should never happen until we're OK to use struct page */
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001667static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001668{
1669 struct page *page = pfn_to_page(pfn);
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001670 bool pinned = PagePinned(page);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001671
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001672 trace_xen_mmu_release_ptpage(pfn, level, pinned);
1673
1674 if (pinned) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001675 if (!PageHighMem(page)) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001676 xen_mc_batch();
1677
Kirill A. Shutemov57c1ffc2013-11-14 14:30:45 -08001678 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001679 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1680
1681 __set_pfn_prot(pfn, PAGE_KERNEL);
1682
1683 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001684 }
1685 ClearPagePinned(page);
1686 }
1687}
1688
1689static void xen_release_pte(unsigned long pfn)
1690{
1691 xen_release_ptpage(pfn, PT_PTE);
1692}
1693
1694static void xen_release_pmd(unsigned long pfn)
1695{
1696 xen_release_ptpage(pfn, PT_PMD);
1697}
1698
1699#if PAGETABLE_LEVELS == 4
1700static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1701{
1702 xen_alloc_ptpage(mm, pfn, PT_PUD);
1703}
1704
1705static void xen_release_pud(unsigned long pfn)
1706{
1707 xen_release_ptpage(pfn, PT_PUD);
1708}
1709#endif
1710
1711void __init xen_reserve_top(void)
1712{
1713#ifdef CONFIG_X86_32
1714 unsigned long top = HYPERVISOR_VIRT_START;
1715 struct xen_platform_parameters pp;
1716
1717 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1718 top = pp.virt_start;
1719
1720 reserve_top_address(-top);
1721#endif /* CONFIG_X86_32 */
1722}
1723
1724/*
1725 * Like __va(), but returns address in the kernel mapping (which is
1726 * all we have until the physical memory mapping has been set up.
1727 */
1728static void *__ka(phys_addr_t paddr)
1729{
1730#ifdef CONFIG_X86_64
1731 return (void *)(paddr + __START_KERNEL_map);
1732#else
1733 return __va(paddr);
1734#endif
1735}
1736
1737/* Convert a machine address to physical address */
1738static unsigned long m2p(phys_addr_t maddr)
1739{
1740 phys_addr_t paddr;
1741
1742 maddr &= PTE_PFN_MASK;
1743 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1744
1745 return paddr;
1746}
1747
1748/* Convert a machine address to kernel virtual */
1749static void *m2v(phys_addr_t maddr)
1750{
1751 return __ka(m2p(maddr));
1752}
1753
Juan Quintela4ec53872010-09-02 15:45:43 +01001754/* Set the page permissions on an identity-mapped pages */
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001755static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001756{
1757 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1758 pte_t pte = pfn_pte(pfn, prot);
1759
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001760 /* For PVH no need to set R/O or R/W to pin them or unpin them. */
1761 if (xen_feature(XENFEAT_auto_translated_physmap))
1762 return;
1763
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001764 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001765 BUG();
1766}
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001767static void set_page_prot(void *addr, pgprot_t prot)
1768{
1769 return set_page_prot_flags(addr, prot, UVMF_NONE);
1770}
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -04001771#ifdef CONFIG_X86_32
Daniel Kiper3f5089532011-05-12 17:19:53 -04001772static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001773{
1774 unsigned pmdidx, pteidx;
1775 unsigned ident_pte;
1776 unsigned long pfn;
1777
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001778 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1779 PAGE_SIZE);
1780
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001781 ident_pte = 0;
1782 pfn = 0;
1783 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1784 pte_t *pte_page;
1785
1786 /* Reuse or allocate a page of ptes */
1787 if (pmd_present(pmd[pmdidx]))
1788 pte_page = m2v(pmd[pmdidx].pmd);
1789 else {
1790 /* Check for free pte pages */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001791 if (ident_pte == LEVEL1_IDENT_ENTRIES)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001792 break;
1793
1794 pte_page = &level1_ident_pgt[ident_pte];
1795 ident_pte += PTRS_PER_PTE;
1796
1797 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1798 }
1799
1800 /* Install mappings */
1801 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1802 pte_t pte;
1803
Stefano Stabellinia91d9282011-06-03 09:51:34 +00001804#ifdef CONFIG_X86_32
1805 if (pfn > max_pfn_mapped)
1806 max_pfn_mapped = pfn;
1807#endif
1808
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001809 if (!pte_none(pte_page[pteidx]))
1810 continue;
1811
1812 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1813 pte_page[pteidx] = pte;
1814 }
1815 }
1816
1817 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1818 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1819
1820 set_page_prot(pmd, PAGE_KERNEL_RO);
1821}
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -04001822#endif
Ian Campbell7e775062010-09-30 12:37:26 +01001823void __init xen_setup_machphys_mapping(void)
1824{
1825 struct xen_machphys_mapping mapping;
Ian Campbell7e775062010-09-30 12:37:26 +01001826
1827 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1828 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
Jan Beulichccbcdf72011-08-16 15:07:41 +01001829 machine_to_phys_nr = mapping.max_mfn + 1;
Ian Campbell7e775062010-09-30 12:37:26 +01001830 } else {
Jan Beulichccbcdf72011-08-16 15:07:41 +01001831 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
Ian Campbell7e775062010-09-30 12:37:26 +01001832 }
Jan Beulichccbcdf72011-08-16 15:07:41 +01001833#ifdef CONFIG_X86_32
Jan Beulich61cca2f2011-09-15 08:52:40 +01001834 WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
1835 < machine_to_phys_mapping);
Jan Beulichccbcdf72011-08-16 15:07:41 +01001836#endif
Ian Campbell7e775062010-09-30 12:37:26 +01001837}
1838
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001839#ifdef CONFIG_X86_64
1840static void convert_pfn_mfn(void *v)
1841{
1842 pte_t *pte = v;
1843 int i;
1844
1845 /* All levels are converted the same way, so just treat them
1846 as ptes. */
1847 for (i = 0; i < PTRS_PER_PTE; i++)
1848 pte[i] = xen_make_pte(pte[i].pte);
1849}
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001850static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
1851 unsigned long addr)
1852{
1853 if (*pt_base == PFN_DOWN(__pa(addr))) {
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001854 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001855 clear_page((void *)addr);
1856 (*pt_base)++;
1857 }
1858 if (*pt_end == PFN_DOWN(__pa(addr))) {
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001859 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001860 clear_page((void *)addr);
1861 (*pt_end)--;
1862 }
1863}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001864/*
Lucas De Marchi0d2eb442011-03-17 16:24:16 -03001865 * Set up the initial kernel pagetable.
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001866 *
1867 * We can construct this by grafting the Xen provided pagetable into
1868 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1869 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
1870 * means that only the kernel has a physical mapping to start with -
1871 * but that's enough to get __va working. We need to fill in the rest
1872 * of the physical mapping once some sort of allocator has been set
1873 * up.
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001874 * NOTE: for PVH, the page tables are native.
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001875 */
Konrad Rzeszutek Wilk3699aad2012-06-28 22:47:35 -04001876void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001877{
1878 pud_t *l3;
1879 pmd_t *l2;
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001880 unsigned long addr[3];
1881 unsigned long pt_base, pt_end;
1882 unsigned i;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001883
Stefano Stabellini14988a42011-02-18 11:32:40 +00001884 /* max_pfn_mapped is the last pfn mapped in the initial memory
1885 * mappings. Considering that on Xen after the kernel mappings we
1886 * have the mappings of some pages that don't exist in pfn space, we
1887 * set max_pfn_mapped to the last real pfn mapped. */
1888 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1889
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001890 pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
1891 pt_end = pt_base + xen_start_info->nr_pt_frames;
1892
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001893 /* Zap identity mapping */
1894 init_level4_pgt[0] = __pgd(0);
1895
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001896 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1897 /* Pre-constructed entries are in pfn, so convert to mfn */
1898 /* L4[272] -> level3_ident_pgt
1899 * L4[511] -> level3_kernel_pgt */
1900 convert_pfn_mfn(init_level4_pgt);
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001901
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001902 /* L3_i[0] -> level2_ident_pgt */
1903 convert_pfn_mfn(level3_ident_pgt);
1904 /* L3_k[510] -> level2_kernel_pgt
1905 * L3_i[511] -> level2_fixmap_pgt */
1906 convert_pfn_mfn(level3_kernel_pgt);
1907 }
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001908 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001909 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1910 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1911
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001912 addr[0] = (unsigned long)pgd;
1913 addr[1] = (unsigned long)l3;
1914 addr[2] = (unsigned long)l2;
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001915 /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
1916 * Both L4[272][0] and L4[511][511] have entries that point to the same
1917 * L2 (PMD) tables. Meaning that if you modify it in __va space
1918 * it will be also modified in the __ka space! (But if you just
1919 * modify the PMD table to point to other PTE's or none, then you
1920 * are OK - which is what cleanup_highmap does) */
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001921 copy_page(level2_ident_pgt, l2);
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001922 /* Graft it onto L4[511][511] */
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001923 copy_page(level2_kernel_pgt, l2);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001924
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001925 /* Get [511][510] and graft that in level2_fixmap_pgt */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001926 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1927 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001928 copy_page(level2_fixmap_pgt, l2);
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001929 /* Note that we don't do anything with level1_fixmap_pgt which
1930 * we don't need. */
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001931 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1932 /* Make pagetable pieces RO */
1933 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1934 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1935 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1936 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1937 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
1938 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1939 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001940
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001941 /* Pin down new L4 */
1942 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1943 PFN_DOWN(__pa_symbol(init_level4_pgt)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001944
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001945 /* Unpin Xen-provided one */
1946 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001947
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001948 /*
1949 * At this stage there can be no user pgd, and no page
1950 * structure to attach it to, so make sure we just set kernel
1951 * pgd.
1952 */
1953 xen_mc_batch();
1954 __xen_write_cr3(true, __pa(init_level4_pgt));
1955 xen_mc_issue(PARAVIRT_LAZY_CPU);
1956 } else
1957 native_write_cr3(__pa(init_level4_pgt));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001958
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001959 /* We can't that easily rip out L3 and L2, as the Xen pagetables are
1960 * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
1961 * the initial domain. For guests using the toolstack, they are in:
1962 * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
1963 * rip out the [L4] (pgd), but for guests we shave off three pages.
1964 */
1965 for (i = 0; i < ARRAY_SIZE(addr); i++)
1966 check_pt_base(&pt_base, &pt_end, addr[i]);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001967
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001968 /* Our (by three pages) smaller Xen pagetable that we are using */
1969 memblock_reserve(PFN_PHYS(pt_base), (pt_end - pt_base) * PAGE_SIZE);
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001970 /* Revector the xen_start_info */
1971 xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001972}
1973#else /* !CONFIG_X86_64 */
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001974static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
1975static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
1976
Daniel Kiper3f5089532011-05-12 17:19:53 -04001977static void __init xen_write_cr3_init(unsigned long cr3)
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001978{
1979 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
1980
1981 BUG_ON(read_cr3() != __pa(initial_page_table));
1982 BUG_ON(cr3 != __pa(swapper_pg_dir));
1983
1984 /*
1985 * We are switching to swapper_pg_dir for the first time (from
1986 * initial_page_table) and therefore need to mark that page
1987 * read-only and then pin it.
1988 *
1989 * Xen disallows sharing of kernel PMDs for PAE
1990 * guests. Therefore we must copy the kernel PMD from
1991 * initial_page_table into a new kernel PMD to be used in
1992 * swapper_pg_dir.
1993 */
1994 swapper_kernel_pmd =
1995 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001996 copy_page(swapper_kernel_pmd, initial_kernel_pmd);
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001997 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
1998 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
1999 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
2000
2001 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
2002 xen_write_cr3(cr3);
2003 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
2004
2005 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
2006 PFN_DOWN(__pa(initial_page_table)));
2007 set_page_prot(initial_page_table, PAGE_KERNEL);
2008 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
2009
2010 pv_mmu_ops.write_cr3 = &xen_write_cr3;
2011}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002012
Konrad Rzeszutek Wilk3699aad2012-06-28 22:47:35 -04002013void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002014{
2015 pmd_t *kernel_pmd;
2016
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002017 initial_kernel_pmd =
2018 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
Jeremy Fitzhardingef0991802010-08-26 16:16:28 -07002019
Stefano Stabellinia91d9282011-06-03 09:51:34 +00002020 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
2021 xen_start_info->nr_pt_frames * PAGE_SIZE +
2022 512*1024);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002023
2024 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04002025 copy_page(initial_kernel_pmd, kernel_pmd);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002026
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002027 xen_map_identity_early(initial_kernel_pmd, max_pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002028
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04002029 copy_page(initial_page_table, pgd);
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002030 initial_page_table[KERNEL_PGD_BOUNDARY] =
2031 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002032
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002033 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
2034 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002035 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
2036
2037 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
2038
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002039 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
2040 PFN_DOWN(__pa(initial_page_table)));
2041 xen_write_cr3(__pa(initial_page_table));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002042
Tejun Heo24aa0782011-07-12 11:16:06 +02002043 memblock_reserve(__pa(xen_start_info->pt_base),
Konrad Rzeszutek Wilkdc6821e2012-01-07 21:27:38 -05002044 xen_start_info->nr_pt_frames * PAGE_SIZE);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002045}
2046#endif /* CONFIG_X86_64 */
2047
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002048static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
2049
Masami Hiramatsu3b3809a2009-04-09 10:55:33 -07002050static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002051{
2052 pte_t pte;
2053
2054 phys >>= PAGE_SHIFT;
2055
2056 switch (idx) {
2057 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
Kees Cook4eefbe72013-04-10 12:24:22 -07002058 case FIX_RO_IDT:
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002059#ifdef CONFIG_X86_32
2060 case FIX_WP_TEST:
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002061# ifdef CONFIG_HIGHMEM
2062 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
2063# endif
2064#else
2065 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
Andy Lutomirski5d5791a2011-08-03 09:31:52 -04002066 case VVAR_PAGE:
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002067#endif
Jeremy Fitzhardinge3ecb1b72009-03-07 23:48:41 -08002068 case FIX_TEXT_POKE0:
2069 case FIX_TEXT_POKE1:
2070 /* All local page mappings */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002071 pte = pfn_pte(phys, prot);
2072 break;
2073
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002074#ifdef CONFIG_X86_LOCAL_APIC
2075 case FIX_APIC_BASE: /* maps dummy local APIC */
2076 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2077 break;
2078#endif
2079
2080#ifdef CONFIG_X86_IO_APIC
2081 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
2082 /*
2083 * We just don't map the IO APIC - all access is via
2084 * hypercalls. Keep the address in the pte for reference.
2085 */
Konrad Rzeszutek Wilk27abd142012-04-16 13:53:40 -04002086 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002087 break;
2088#endif
2089
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08002090 case FIX_PARAVIRT_BOOTMAP:
2091 /* This is an MFN, but it isn't an IO mapping from the
2092 IO domain */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002093 pte = mfn_pte(phys, prot);
2094 break;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08002095
2096 default:
2097 /* By default, set_fixmap is used for hardware mappings */
2098 pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP));
2099 break;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002100 }
2101
2102 __native_set_fixmap(idx, pte);
2103
2104#ifdef CONFIG_X86_64
2105 /* Replicate changes to map the vsyscall page into the user
2106 pagetable vsyscall mapping. */
Andy Lutomirski5d5791a2011-08-03 09:31:52 -04002107 if ((idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) ||
2108 idx == VVAR_PAGE) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002109 unsigned long vaddr = __fix_to_virt(idx);
2110 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
2111 }
2112#endif
2113}
2114
Daniel Kiper3f5089532011-05-12 17:19:53 -04002115static void __init xen_post_allocator_init(void)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002116{
Mukesh Rathor4e44e442013-12-31 12:41:27 -05002117 if (xen_feature(XENFEAT_auto_translated_physmap))
2118 return;
2119
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002120 pv_mmu_ops.set_pte = xen_set_pte;
2121 pv_mmu_ops.set_pmd = xen_set_pmd;
2122 pv_mmu_ops.set_pud = xen_set_pud;
2123#if PAGETABLE_LEVELS == 4
2124 pv_mmu_ops.set_pgd = xen_set_pgd;
2125#endif
2126
2127 /* This will work as long as patching hasn't happened yet
2128 (which it hasn't) */
2129 pv_mmu_ops.alloc_pte = xen_alloc_pte;
2130 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
2131 pv_mmu_ops.release_pte = xen_release_pte;
2132 pv_mmu_ops.release_pmd = xen_release_pmd;
2133#if PAGETABLE_LEVELS == 4
2134 pv_mmu_ops.alloc_pud = xen_alloc_pud;
2135 pv_mmu_ops.release_pud = xen_release_pud;
2136#endif
2137
2138#ifdef CONFIG_X86_64
Konrad Rzeszutek Wilkd3eb2c82013-03-22 10:34:28 -04002139 pv_mmu_ops.write_cr3 = &xen_write_cr3;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002140 SetPagePinned(virt_to_page(level3_user_vsyscall));
2141#endif
2142 xen_mark_init_mm_pinned();
2143}
2144
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002145static void xen_leave_lazy_mmu(void)
2146{
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08002147 preempt_disable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002148 xen_mc_flush();
2149 paravirt_leave_lazy_mmu();
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08002150 preempt_enable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002151}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002152
Daniel Kiper3f5089532011-05-12 17:19:53 -04002153static const struct pv_mmu_ops xen_mmu_ops __initconst = {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002154 .read_cr2 = xen_read_cr2,
2155 .write_cr2 = xen_write_cr2,
2156
2157 .read_cr3 = xen_read_cr3,
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002158 .write_cr3 = xen_write_cr3_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002159
2160 .flush_tlb_user = xen_flush_tlb,
2161 .flush_tlb_kernel = xen_flush_tlb,
2162 .flush_tlb_single = xen_flush_tlb_single,
2163 .flush_tlb_others = xen_flush_tlb_others,
2164
2165 .pte_update = paravirt_nop,
2166 .pte_update_defer = paravirt_nop,
2167
2168 .pgd_alloc = xen_pgd_alloc,
2169 .pgd_free = xen_pgd_free,
2170
2171 .alloc_pte = xen_alloc_pte_init,
2172 .release_pte = xen_release_pte_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002173 .alloc_pmd = xen_alloc_pmd_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002174 .release_pmd = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002175
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002176 .set_pte = xen_set_pte_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002177 .set_pte_at = xen_set_pte_at,
2178 .set_pmd = xen_set_pmd_hyper,
2179
2180 .ptep_modify_prot_start = __ptep_modify_prot_start,
2181 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2182
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002183 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2184 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002185
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002186 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
2187 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002188
2189#ifdef CONFIG_X86_PAE
2190 .set_pte_atomic = xen_set_pte_atomic,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002191 .pte_clear = xen_pte_clear,
2192 .pmd_clear = xen_pmd_clear,
2193#endif /* CONFIG_X86_PAE */
2194 .set_pud = xen_set_pud_hyper,
2195
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002196 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2197 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002198
2199#if PAGETABLE_LEVELS == 4
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002200 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2201 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002202 .set_pgd = xen_set_pgd_hyper,
2203
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002204 .alloc_pud = xen_alloc_pmd_init,
2205 .release_pud = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002206#endif /* PAGETABLE_LEVELS == 4 */
2207
2208 .activate_mm = xen_activate_mm,
2209 .dup_mmap = xen_dup_mmap,
2210 .exit_mmap = xen_exit_mmap,
2211
2212 .lazy_mode = {
2213 .enter = paravirt_enter_lazy_mmu,
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002214 .leave = xen_leave_lazy_mmu,
Boris Ostrovsky511ba862013-03-23 09:36:36 -04002215 .flush = paravirt_flush_lazy_mmu,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002216 },
2217
2218 .set_fixmap = xen_set_fixmap,
2219};
2220
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002221void __init xen_init_mmu_ops(void)
2222{
Attilio Rao7737b212012-08-21 21:22:38 +01002223 x86_init.paging.pagetable_init = xen_pagetable_init;
Mukesh Rathor76bccef2014-01-03 09:48:08 -05002224
2225 /* Optimization - we can use the HVM one but it has no idea which
2226 * VCPUs are descheduled - which means that it will needlessly IPI
2227 * them. Xen knows so let it do the job.
2228 */
2229 if (xen_feature(XENFEAT_auto_translated_physmap)) {
2230 pv_mmu_ops.flush_tlb_others = xen_flush_tlb_others;
2231 return;
2232 }
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002233 pv_mmu_ops = xen_mmu_ops;
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -07002234
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002235 memset(dummy_mapping, 0xff, PAGE_SIZE);
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002236}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002237
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002238/* Protected by xen_reservation_lock. */
2239#define MAX_CONTIG_ORDER 9 /* 2MB */
2240static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2241
2242#define VOID_PTE (mfn_pte(0, __pgprot(0)))
2243static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2244 unsigned long *in_frames,
2245 unsigned long *out_frames)
2246{
2247 int i;
2248 struct multicall_space mcs;
2249
2250 xen_mc_batch();
2251 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2252 mcs = __xen_mc_entry(0);
2253
2254 if (in_frames)
2255 in_frames[i] = virt_to_mfn(vaddr);
2256
2257 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
Konrad Rzeszutek Wilk6eaa4122011-01-18 20:09:41 -05002258 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002259
2260 if (out_frames)
2261 out_frames[i] = virt_to_pfn(vaddr);
2262 }
2263 xen_mc_issue(0);
2264}
2265
2266/*
2267 * Update the pfn-to-mfn mappings for a virtual address range, either to
2268 * point to an array of mfns, or contiguously from a single starting
2269 * mfn.
2270 */
2271static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2272 unsigned long *mfns,
2273 unsigned long first_mfn)
2274{
2275 unsigned i, limit;
2276 unsigned long mfn;
2277
2278 xen_mc_batch();
2279
2280 limit = 1u << order;
2281 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2282 struct multicall_space mcs;
2283 unsigned flags;
2284
2285 mcs = __xen_mc_entry(0);
2286 if (mfns)
2287 mfn = mfns[i];
2288 else
2289 mfn = first_mfn + i;
2290
2291 if (i < (limit - 1))
2292 flags = 0;
2293 else {
2294 if (order == 0)
2295 flags = UVMF_INVLPG | UVMF_ALL;
2296 else
2297 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2298 }
2299
2300 MULTI_update_va_mapping(mcs.mc, vaddr,
2301 mfn_pte(mfn, PAGE_KERNEL), flags);
2302
2303 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2304 }
2305
2306 xen_mc_issue(0);
2307}
2308
2309/*
2310 * Perform the hypercall to exchange a region of our pfns to point to
2311 * memory with the required contiguous alignment. Takes the pfns as
2312 * input, and populates mfns as output.
2313 *
2314 * Returns a success code indicating whether the hypervisor was able to
2315 * satisfy the request or not.
2316 */
2317static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2318 unsigned long *pfns_in,
2319 unsigned long extents_out,
2320 unsigned int order_out,
2321 unsigned long *mfns_out,
2322 unsigned int address_bits)
2323{
2324 long rc;
2325 int success;
2326
2327 struct xen_memory_exchange exchange = {
2328 .in = {
2329 .nr_extents = extents_in,
2330 .extent_order = order_in,
2331 .extent_start = pfns_in,
2332 .domid = DOMID_SELF
2333 },
2334 .out = {
2335 .nr_extents = extents_out,
2336 .extent_order = order_out,
2337 .extent_start = mfns_out,
2338 .address_bits = address_bits,
2339 .domid = DOMID_SELF
2340 }
2341 };
2342
2343 BUG_ON(extents_in << order_in != extents_out << order_out);
2344
2345 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2346 success = (exchange.nr_exchanged == extents_in);
2347
2348 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2349 BUG_ON(success && (rc != 0));
2350
2351 return success;
2352}
2353
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002354int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
Stefano Stabellini69908902013-10-09 16:56:32 +00002355 unsigned int address_bits,
2356 dma_addr_t *dma_handle)
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002357{
2358 unsigned long *in_frames = discontig_frames, out_frame;
2359 unsigned long flags;
2360 int success;
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002361 unsigned long vstart = (unsigned long)phys_to_virt(pstart);
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002362
2363 /*
2364 * Currently an auto-translated guest will not perform I/O, nor will
2365 * it require PAE page directories below 4GB. Therefore any calls to
2366 * this function are redundant and can be ignored.
2367 */
2368
2369 if (xen_feature(XENFEAT_auto_translated_physmap))
2370 return 0;
2371
2372 if (unlikely(order > MAX_CONTIG_ORDER))
2373 return -ENOMEM;
2374
2375 memset((void *) vstart, 0, PAGE_SIZE << order);
2376
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002377 spin_lock_irqsave(&xen_reservation_lock, flags);
2378
2379 /* 1. Zap current PTEs, remembering MFNs. */
2380 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2381
2382 /* 2. Get a new contiguous memory extent. */
2383 out_frame = virt_to_pfn(vstart);
2384 success = xen_exchange_memory(1UL << order, 0, in_frames,
2385 1, order, &out_frame,
2386 address_bits);
2387
2388 /* 3. Map the new extent in place of old pages. */
2389 if (success)
2390 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2391 else
2392 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2393
2394 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2395
Stefano Stabellini69908902013-10-09 16:56:32 +00002396 *dma_handle = virt_to_machine(vstart).maddr;
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002397 return success ? 0 : -ENOMEM;
2398}
2399EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2400
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002401void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002402{
2403 unsigned long *out_frames = discontig_frames, in_frame;
2404 unsigned long flags;
2405 int success;
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002406 unsigned long vstart;
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002407
2408 if (xen_feature(XENFEAT_auto_translated_physmap))
2409 return;
2410
2411 if (unlikely(order > MAX_CONTIG_ORDER))
2412 return;
2413
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002414 vstart = (unsigned long)phys_to_virt(pstart);
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002415 memset((void *) vstart, 0, PAGE_SIZE << order);
2416
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002417 spin_lock_irqsave(&xen_reservation_lock, flags);
2418
2419 /* 1. Find start MFN of contiguous extent. */
2420 in_frame = virt_to_mfn(vstart);
2421
2422 /* 2. Zap current PTEs. */
2423 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2424
2425 /* 3. Do the exchange for non-contiguous MFNs. */
2426 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2427 0, out_frames, 0);
2428
2429 /* 4. Map new pages in place of old pages. */
2430 if (success)
2431 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2432 else
2433 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2434
2435 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2436}
2437EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2438
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002439#ifdef CONFIG_XEN_PVHVM
Olaf Hering34b6f012012-10-01 21:18:01 +02002440#ifdef CONFIG_PROC_VMCORE
2441/*
2442 * This function is used in two contexts:
2443 * - the kdump kernel has to check whether a pfn of the crashed kernel
2444 * was a ballooned page. vmcore is using this function to decide
2445 * whether to access a pfn of the crashed kernel.
2446 * - the kexec kernel has to check whether a pfn was ballooned by the
2447 * previous kernel. If the pfn is ballooned, handle it properly.
2448 * Returns 0 if the pfn is not backed by a RAM page, the caller may
2449 * handle the pfn special in this case.
2450 */
2451static int xen_oldmem_pfn_is_ram(unsigned long pfn)
2452{
2453 struct xen_hvm_get_mem_type a = {
2454 .domid = DOMID_SELF,
2455 .pfn = pfn,
2456 };
2457 int ram;
2458
2459 if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a))
2460 return -ENXIO;
2461
2462 switch (a.mem_type) {
2463 case HVMMEM_mmio_dm:
2464 ram = 0;
2465 break;
2466 case HVMMEM_ram_rw:
2467 case HVMMEM_ram_ro:
2468 default:
2469 ram = 1;
2470 break;
2471 }
2472
2473 return ram;
2474}
2475#endif
2476
Stefano Stabellini59151002010-06-17 14:22:52 +01002477static void xen_hvm_exit_mmap(struct mm_struct *mm)
2478{
2479 struct xen_hvm_pagetable_dying a;
2480 int rc;
2481
2482 a.domid = DOMID_SELF;
2483 a.gpa = __pa(mm->pgd);
2484 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2485 WARN_ON_ONCE(rc < 0);
2486}
2487
2488static int is_pagetable_dying_supported(void)
2489{
2490 struct xen_hvm_pagetable_dying a;
2491 int rc = 0;
2492
2493 a.domid = DOMID_SELF;
2494 a.gpa = 0x00;
2495 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2496 if (rc < 0) {
2497 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
2498 return 0;
2499 }
2500 return 1;
2501}
2502
2503void __init xen_hvm_init_mmu_ops(void)
2504{
2505 if (is_pagetable_dying_supported())
2506 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
Olaf Hering34b6f012012-10-01 21:18:01 +02002507#ifdef CONFIG_PROC_VMCORE
2508 register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram);
2509#endif
Stefano Stabellini59151002010-06-17 14:22:52 +01002510}
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002511#endif
Stefano Stabellini59151002010-06-17 14:22:52 +01002512
Ian Campbellde1ef202009-05-21 10:09:46 +01002513#define REMAP_BATCH_SIZE 16
2514
2515struct remap_data {
2516 unsigned long mfn;
2517 pgprot_t prot;
2518 struct mmu_update *mmu_update;
2519};
2520
2521static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2522 unsigned long addr, void *data)
2523{
2524 struct remap_data *rmd = data;
2525 pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
2526
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -08002527 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
Ian Campbellde1ef202009-05-21 10:09:46 +01002528 rmd->mmu_update->val = pte_val_ma(pte);
2529 rmd->mmu_update++;
2530
2531 return 0;
2532}
2533
2534int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2535 unsigned long addr,
Ian Campbell7892f692012-10-16 17:19:15 +01002536 xen_pfn_t mfn, int nr,
Ian Campbell9a032e32012-10-17 13:37:49 -07002537 pgprot_t prot, unsigned domid,
2538 struct page **pages)
2539
Ian Campbellde1ef202009-05-21 10:09:46 +01002540{
2541 struct remap_data rmd;
2542 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
2543 int batch;
2544 unsigned long range;
2545 int err = 0;
2546
Stefano Stabellini1a1d4332012-08-22 17:20:16 +01002547 if (xen_feature(XENFEAT_auto_translated_physmap))
2548 return -EINVAL;
2549
Ian Campbellde1ef202009-05-21 10:09:46 +01002550 prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
2551
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -07002552 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
Ian Campbellde1ef202009-05-21 10:09:46 +01002553
2554 rmd.mfn = mfn;
2555 rmd.prot = prot;
2556
2557 while (nr) {
2558 batch = min(REMAP_BATCH_SIZE, nr);
2559 range = (unsigned long)batch << PAGE_SHIFT;
2560
2561 rmd.mmu_update = mmu_update;
2562 err = apply_to_page_range(vma->vm_mm, addr, range,
2563 remap_area_mfn_pte_fn, &rmd);
2564 if (err)
2565 goto out;
2566
David Vrabel69870a82012-08-30 13:58:11 +01002567 err = HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid);
2568 if (err < 0)
Ian Campbellde1ef202009-05-21 10:09:46 +01002569 goto out;
2570
2571 nr -= batch;
2572 addr += range;
2573 }
2574
2575 err = 0;
2576out:
2577
Konrad Rzeszutek Wilk95a7d762012-10-31 12:38:31 -04002578 xen_flush_tlb_all();
Ian Campbellde1ef202009-05-21 10:09:46 +01002579
2580 return err;
2581}
2582EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
Ian Campbell9a032e32012-10-17 13:37:49 -07002583
2584/* Returns: 0 success */
2585int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
2586 int numpgs, struct page **pages)
2587{
2588 if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
2589 return 0;
2590
2591 return -EINVAL;
2592}
2593EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);