blob: f62af7647ec9879055f433a540166cb4162a1f0d [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070043#include <linux/debugfs.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044#include <linux/bug.h>
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -070045#include <linux/vmalloc.h>
Randy Dunlap44408ad2009-05-12 13:31:40 -070046#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090047#include <linux/gfp.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -070048#include <linux/memblock.h>
Konrad Rzeszutek Wilk2222e712010-12-22 08:57:30 -050049#include <linux/seq_file.h>
Olaf Hering34b6f012012-10-01 21:18:01 +020050#include <linux/crash_dump.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070051
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -080052#include <trace/events/xen.h>
53
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070054#include <asm/pgtable.h>
55#include <asm/tlbflush.h>
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -070056#include <asm/fixmap.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070057#include <asm/mmu_context.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080058#include <asm/setup.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070059#include <asm/paravirt.h>
Alex Nixon7347b402010-02-19 13:31:06 -050060#include <asm/e820.h>
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070061#include <asm/linkage.h>
Alex Nixon08bbc9d2009-02-09 12:05:46 -080062#include <asm/page.h>
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -070063#include <asm/init.h>
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -070064#include <asm/pat.h>
Andrew Jones900cba82009-12-18 10:31:31 +010065#include <asm/smp.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070066
67#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070068#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070069
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080070#include <xen/xen.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070071#include <xen/page.h>
72#include <xen/interface/xen.h>
Stefano Stabellini59151002010-06-17 14:22:52 +010073#include <xen/interface/hvm/hvm_op.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080074#include <xen/interface/version.h>
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080075#include <xen/interface/memory.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080076#include <xen/hvc-console.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070077
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070078#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070079#include "mmu.h"
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070080#include "debugfs.h"
81
Alex Nixon19001c82009-02-09 12:05:46 -080082/*
83 * Protects atomic reservation decrease/increase against concurrent increases.
Daniel Kiper06f521d2011-03-08 22:45:46 +010084 * Also protects non-atomic updates of current_pages and balloon lists.
Alex Nixon19001c82009-02-09 12:05:46 -080085 */
86DEFINE_SPINLOCK(xen_reservation_lock);
87
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -040088#ifdef CONFIG_X86_32
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080089/*
90 * Identity map, in addition to plain kernel map. This needs to be
91 * large enough to allocate page table pages to allocate the rest.
92 * Each page can map 2MB.
93 */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -070094#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
95static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -040096#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080097#ifdef CONFIG_X86_64
98/* l3 pud for userspace vsyscall mapping */
99static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
100#endif /* CONFIG_X86_64 */
101
102/*
103 * Note about cr3 (pagetable base) values:
104 *
105 * xen_cr3 contains the current logical cr3 value; it contains the
106 * last set cr3. This may not be the current effective cr3, because
107 * its update may be being lazily deferred. However, a vcpu looking
108 * at its own cr3 can use this value knowing that it everything will
109 * be self-consistent.
110 *
111 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
112 * hypercall to set the vcpu cr3 is complete (so it may be a little
113 * out of date, but it will never be set early). If one vcpu is
114 * looking at another vcpu's cr3 value, it should use this variable.
115 */
116DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
117DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
118
119
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700120/*
121 * Just beyond the highest usermode address. STACK_TOP_MAX has a
122 * redzone above it, so round it up to a PGD boundary.
123 */
124#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
125
Jeremy Fitzhardinge9976b392009-02-27 09:19:26 -0800126unsigned long arbitrary_virt_to_mfn(void *vaddr)
127{
128 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
129
130 return PFN_DOWN(maddr.maddr);
131}
132
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700133xmaddr_t arbitrary_virt_to_machine(void *vaddr)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700134{
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700135 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100136 unsigned int level;
Chris Lalancette9f32d212008-10-23 17:40:25 -0700137 pte_t *pte;
138 unsigned offset;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700139
Chris Lalancette9f32d212008-10-23 17:40:25 -0700140 /*
141 * if the PFN is in the linear mapped vaddr range, we can just use
142 * the (quick) virt_to_machine() p2m lookup
143 */
144 if (virt_addr_valid(vaddr))
145 return virt_to_machine(vaddr);
146
147 /* otherwise we have to do a (slower) full page-table walk */
148
149 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700150 BUG_ON(pte == NULL);
Chris Lalancette9f32d212008-10-23 17:40:25 -0700151 offset = address & ~PAGE_MASK;
Jeremy Fitzhardingeebd879e2008-07-08 15:06:54 -0700152 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700153}
Stephen Rothwellde23be52011-01-15 10:36:26 +1100154EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700155
156void make_lowmem_page_readonly(void *vaddr)
157{
158 pte_t *pte, ptev;
159 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100160 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700161
Ingo Molnarf0646e42008-01-30 13:33:43 +0100162 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700163 if (pte == NULL)
164 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700165
166 ptev = pte_wrprotect(*pte);
167
168 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
169 BUG();
170}
171
172void make_lowmem_page_readwrite(void *vaddr)
173{
174 pte_t *pte, ptev;
175 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100176 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700177
Ingo Molnarf0646e42008-01-30 13:33:43 +0100178 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700179 if (pte == NULL)
180 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700181
182 ptev = pte_mkwrite(*pte);
183
184 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
185 BUG();
186}
187
188
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700189static bool xen_page_pinned(void *ptr)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100190{
191 struct page *page = virt_to_page(ptr);
192
193 return PagePinned(page);
194}
195
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800196void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800197{
198 struct multicall_space mcs;
199 struct mmu_update *u;
200
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800201 trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
202
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800203 mcs = xen_mc_entry(sizeof(*u));
204 u = mcs.args;
205
206 /* ptep might be kmapped when using 32-bit HIGHPTE */
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -0800207 u->ptr = virt_to_machine(ptep).maddr;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800208 u->val = pte_val_ma(pteval);
209
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800210 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800211
212 xen_mc_issue(PARAVIRT_LAZY_MMU);
213}
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800214EXPORT_SYMBOL_GPL(xen_set_domain_pte);
215
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700216static void xen_extend_mmu_update(const struct mmu_update *update)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700217{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700218 struct multicall_space mcs;
219 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700220
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700221 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
222
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700223 if (mcs.mc != NULL) {
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700224 mcs.mc->args[1]++;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700225 } else {
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700226 mcs = __xen_mc_entry(sizeof(*u));
227 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
228 }
229
230 u = mcs.args;
231 *u = *update;
232}
233
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800234static void xen_extend_mmuext_op(const struct mmuext_op *op)
235{
236 struct multicall_space mcs;
237 struct mmuext_op *u;
238
239 mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
240
241 if (mcs.mc != NULL) {
242 mcs.mc->args[1]++;
243 } else {
244 mcs = __xen_mc_entry(sizeof(*u));
245 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
246 }
247
248 u = mcs.args;
249 *u = *op;
250}
251
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800252static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700253{
254 struct mmu_update u;
255
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700256 preempt_disable();
257
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700258 xen_mc_batch();
259
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700260 /* ptr may be ioremapped for 64-bit pagetable setup */
261 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700262 u.val = pmd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700263 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700264
265 xen_mc_issue(PARAVIRT_LAZY_MMU);
266
267 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700268}
269
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800270static void xen_set_pmd(pmd_t *ptr, pmd_t val)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100271{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800272 trace_xen_mmu_set_pmd(ptr, val);
273
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100274 /* If page is not pinned, we can just update the entry
275 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700276 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100277 *ptr = val;
278 return;
279 }
280
281 xen_set_pmd_hyper(ptr, val);
282}
283
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700284/*
285 * Associate a virtual page frame with a given physical page frame
286 * and protection flags for that frame.
287 */
288void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
289{
Jeremy Fitzhardinge836fe2f2008-07-08 15:06:58 -0700290 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700291}
292
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800293static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
294{
295 struct mmu_update u;
296
297 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
298 return false;
299
300 xen_mc_batch();
301
302 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
303 u.val = pte_val_ma(pteval);
304 xen_extend_mmu_update(&u);
305
306 xen_mc_issue(PARAVIRT_LAZY_MMU);
307
308 return true;
309}
310
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800311static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800312{
David Vrabeld095d432012-07-09 11:39:05 +0100313 if (!xen_batched_set_pte(ptep, pteval)) {
314 /*
315 * Could call native_set_pte() here and trap and
316 * emulate the PTE write but with 32-bit guests this
317 * needs two traps (one for each of the two 32-bit
318 * words in the PTE) so do one hypercall directly
319 * instead.
320 */
321 struct mmu_update u;
322
323 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
324 u.val = pte_val_ma(pteval);
325 HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
326 }
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800327}
328
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800329static void xen_set_pte(pte_t *ptep, pte_t pteval)
330{
331 trace_xen_mmu_set_pte(ptep, pteval);
332 __xen_set_pte(ptep, pteval);
333}
334
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800335static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700336 pte_t *ptep, pte_t pteval)
337{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800338 trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
339 __xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700340}
341
Tejf63c2f22008-12-16 11:56:06 -0800342pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
343 unsigned long addr, pte_t *ptep)
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700344{
345 /* Just return the pte as-is. We preserve the bits on commit */
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800346 trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700347 return *ptep;
348}
349
350void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
351 pte_t *ptep, pte_t pte)
352{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700353 struct mmu_update u;
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700354
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800355 trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700356 xen_mc_batch();
357
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -0800358 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700359 u.val = pte_val_ma(pte);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700360 xen_extend_mmu_update(&u);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700361
362 xen_mc_issue(PARAVIRT_LAZY_MMU);
363}
364
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700365/* Assume pteval_t is equivalent to all the other *val_t types. */
366static pteval_t pte_mfn_to_pfn(pteval_t val)
367{
David Vrabel5926f872014-03-25 10:38:37 +0000368 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700369 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Konrad Rzeszutek Wilkb7e5ffe2012-05-03 16:14:14 -0400370 unsigned long pfn = mfn_to_pfn(mfn);
371
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700372 pteval_t flags = val & PTE_FLAGS_MASK;
Konrad Rzeszutek Wilkb7e5ffe2012-05-03 16:14:14 -0400373 if (unlikely(pfn == ~0))
374 val = flags & ~_PAGE_PRESENT;
375 else
376 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700377 }
378
379 return val;
380}
381
382static pteval_t pte_pfn_to_mfn(pteval_t val)
383{
David Vrabel5926f872014-03-25 10:38:37 +0000384 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700385 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700386 pteval_t flags = val & PTE_FLAGS_MASK;
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500387 unsigned long mfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700388
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500389 if (!xen_feature(XENFEAT_auto_translated_physmap))
390 mfn = get_phys_to_machine(pfn);
391 else
392 mfn = pfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700393 /*
394 * If there's no mfn for the pfn, then just create an
395 * empty non-present pte. Unfortunately this loses
396 * information about the original pfn, so
397 * pte_mfn_to_pfn is asymmetric.
398 */
399 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
400 mfn = 0;
401 flags = 0;
David Vrabel7f2f8822014-01-08 14:01:01 +0000402 } else
403 mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700404 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700405 }
406
407 return val;
408}
409
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700410__visible pteval_t xen_pte_val(pte_t pte)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700411{
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700412 pteval_t pteval = pte.pte;
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500413#if 0
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700414 /* If this is a WC pte, convert back from Xen WC to Linux WC */
415 if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {
416 WARN_ON(!pat_enabled);
417 pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
418 }
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500419#endif
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700420 return pte_mfn_to_pfn(pteval);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700421}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800422PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700423
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700424__visible pgdval_t xen_pgd_val(pgd_t pgd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700425{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700426 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700427}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800428PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700429
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700430/*
431 * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7
432 * are reserved for now, to correspond to the Intel-reserved PAT
433 * types.
434 *
435 * We expect Linux's PAT set as follows:
436 *
437 * Idx PTE flags Linux Xen Default
438 * 0 WB WB WB
439 * 1 PWT WC WT WT
440 * 2 PCD UC- UC- UC-
441 * 3 PCD PWT UC UC UC
442 * 4 PAT WB WC WB
443 * 5 PAT PWT WC WP WT
Konrad Rzeszutek Wilkb1922a52013-09-25 15:27:50 -0400444 * 6 PAT PCD UC- rsv UC-
445 * 7 PAT PCD PWT UC rsv UC
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700446 */
447
448void xen_set_pat(u64 pat)
449{
450 /* We expect Linux to use a PAT setting of
451 * UC UC- WC WB (ignoring the PAT flag) */
452 WARN_ON(pat != 0x0007010600070106ull);
453}
454
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700455__visible pte_t xen_make_pte(pteval_t pte)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700456{
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500457#if 0
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700458 /* If Linux is trying to set a WC pte, then map to the Xen WC.
459 * If _PAGE_PAT is set, then it probably means it is really
460 * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope
461 * things work out OK...
462 *
463 * (We should never see kernel mappings with _PAGE_PSE set,
464 * but we could see hugetlbfs mappings, I think.).
465 */
466 if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) {
467 if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)
468 pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
469 }
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500470#endif
David Vrabel7f2f8822014-01-08 14:01:01 +0000471 pte = pte_pfn_to_mfn(pte);
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800472
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700473 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700474}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800475PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700476
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700477__visible pgd_t xen_make_pgd(pgdval_t pgd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700478{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700479 pgd = pte_pfn_to_mfn(pgd);
480 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700481}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800482PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700483
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700484__visible pmdval_t xen_pmd_val(pmd_t pmd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700485{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700486 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700487}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800488PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100489
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800490static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700491{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700492 struct mmu_update u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700493
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700494 preempt_disable();
495
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700496 xen_mc_batch();
497
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700498 /* ptr may be ioremapped for 64-bit pagetable setup */
499 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700500 u.val = pud_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700501 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700502
503 xen_mc_issue(PARAVIRT_LAZY_MMU);
504
505 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700506}
507
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800508static void xen_set_pud(pud_t *ptr, pud_t val)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100509{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800510 trace_xen_mmu_set_pud(ptr, val);
511
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100512 /* If page is not pinned, we can just update the entry
513 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700514 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100515 *ptr = val;
516 return;
517 }
518
519 xen_set_pud_hyper(ptr, val);
520}
521
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700522#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800523static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700524{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800525 trace_xen_mmu_set_pte_atomic(ptep, pte);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700526 set_64bit((u64 *)ptep, native_pte_val(pte));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700527}
528
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800529static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700530{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800531 trace_xen_mmu_pte_clear(mm, addr, ptep);
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800532 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
533 native_pte_clear(mm, addr, ptep);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700534}
535
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800536static void xen_pmd_clear(pmd_t *pmdp)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700537{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800538 trace_xen_mmu_pmd_clear(pmdp);
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100539 set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700540}
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700541#endif /* CONFIG_X86_PAE */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700542
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700543__visible pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700544{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700545 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700546 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700547}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800548PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700549
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700550#if PAGETABLE_LEVELS == 4
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700551__visible pudval_t xen_pud_val(pud_t pud)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700552{
553 return pte_mfn_to_pfn(pud.pud);
554}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800555PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700556
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700557__visible pud_t xen_make_pud(pudval_t pud)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700558{
559 pud = pte_pfn_to_mfn(pud);
560
561 return native_make_pud(pud);
562}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800563PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700564
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800565static pgd_t *xen_get_user_pgd(pgd_t *pgd)
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700566{
567 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
568 unsigned offset = pgd - pgd_page;
569 pgd_t *user_ptr = NULL;
570
571 if (offset < pgd_index(USER_LIMIT)) {
572 struct page *page = virt_to_page(pgd_page);
573 user_ptr = (pgd_t *)page->private;
574 if (user_ptr)
575 user_ptr += offset;
576 }
577
578 return user_ptr;
579}
580
581static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700582{
583 struct mmu_update u;
584
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700585 u.ptr = virt_to_machine(ptr).maddr;
586 u.val = pgd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700587 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700588}
589
590/*
591 * Raw hypercall-based set_pgd, intended for in early boot before
592 * there's a page structure. This implies:
593 * 1. The only existing pagetable is the kernel's
594 * 2. It is always pinned
595 * 3. It has no user pagetable attached to it
596 */
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800597static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700598{
599 preempt_disable();
600
601 xen_mc_batch();
602
603 __xen_set_pgd_hyper(ptr, val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700604
605 xen_mc_issue(PARAVIRT_LAZY_MMU);
606
607 preempt_enable();
608}
609
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800610static void xen_set_pgd(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700611{
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700612 pgd_t *user_ptr = xen_get_user_pgd(ptr);
613
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800614 trace_xen_mmu_set_pgd(ptr, user_ptr, val);
615
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700616 /* If page is not pinned, we can just update the entry
617 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700618 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700619 *ptr = val;
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700620 if (user_ptr) {
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700621 WARN_ON(xen_page_pinned(user_ptr));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700622 *user_ptr = val;
623 }
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700624 return;
625 }
626
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700627 /* If it's pinned, then we can at least batch the kernel and
628 user updates together. */
629 xen_mc_batch();
630
631 __xen_set_pgd_hyper(ptr, val);
632 if (user_ptr)
633 __xen_set_pgd_hyper(user_ptr, val);
634
635 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700636}
637#endif /* PAGETABLE_LEVELS == 4 */
638
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700639/*
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700640 * (Yet another) pagetable walker. This one is intended for pinning a
641 * pagetable. This means that it walks a pagetable and calls the
642 * callback function on each page it finds making up the page table,
643 * at every level. It walks the entire pagetable, but it only bothers
644 * pinning pte pages which are below limit. In the normal case this
645 * will be STACK_TOP_MAX, but at boot we need to pin up to
646 * FIXADDR_TOP.
647 *
648 * For 32-bit the important bit is that we don't pin beyond there,
649 * because then we start getting into Xen's ptes.
650 *
651 * For 64-bit, we must skip the Xen hole in the middle of the address
652 * space, just after the big x86-64 virtual hole.
653 */
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000654static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
655 int (*func)(struct mm_struct *mm, struct page *,
656 enum pt_level),
657 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700658{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700659 int flush = 0;
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700660 unsigned hole_low, hole_high;
661 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
662 unsigned pgdidx, pudidx, pmdidx;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700663
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700664 /* The limit is the last byte to be touched */
665 limit--;
666 BUG_ON(limit >= FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700667
668 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700669 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700670
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700671 /*
672 * 64-bit has a great big hole in the middle of the address
673 * space, which contains the Xen mappings. On 32-bit these
674 * will end up making a zero-sized hole and so is a no-op.
675 */
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700676 hole_low = pgd_index(USER_LIMIT);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700677 hole_high = pgd_index(PAGE_OFFSET);
678
679 pgdidx_limit = pgd_index(limit);
680#if PTRS_PER_PUD > 1
681 pudidx_limit = pud_index(limit);
682#else
683 pudidx_limit = 0;
684#endif
685#if PTRS_PER_PMD > 1
686 pmdidx_limit = pmd_index(limit);
687#else
688 pmdidx_limit = 0;
689#endif
690
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700691 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700692 pud_t *pud;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700693
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700694 if (pgdidx >= hole_low && pgdidx < hole_high)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700695 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700696
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700697 if (!pgd_val(pgd[pgdidx]))
698 continue;
699
700 pud = pud_offset(&pgd[pgdidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700701
702 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700703 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700704
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700705 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700706 pmd_t *pmd;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700707
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700708 if (pgdidx == pgdidx_limit &&
709 pudidx > pudidx_limit)
710 goto out;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700711
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700712 if (pud_none(pud[pudidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700713 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700714
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700715 pmd = pmd_offset(&pud[pudidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700716
717 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700718 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700719
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700720 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
721 struct page *pte;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700722
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700723 if (pgdidx == pgdidx_limit &&
724 pudidx == pudidx_limit &&
725 pmdidx > pmdidx_limit)
726 goto out;
727
728 if (pmd_none(pmd[pmdidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700729 continue;
730
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700731 pte = pmd_page(pmd[pmdidx]);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700732 flush |= (*func)(mm, pte, PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700733 }
734 }
735 }
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700736
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700737out:
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700738 /* Do the top level last, so that the callbacks can use it as
739 a cue to do final things like tlb flushes. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700740 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700741
742 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700743}
744
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000745static int xen_pgd_walk(struct mm_struct *mm,
746 int (*func)(struct mm_struct *mm, struct page *,
747 enum pt_level),
748 unsigned long limit)
749{
750 return __xen_pgd_walk(mm, mm->pgd, func, limit);
751}
752
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700753/* If we're using split pte locks, then take the page's lock and
754 return a pointer to it. Otherwise return NULL. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700755static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700756{
757 spinlock_t *ptl = NULL;
758
Kirill A. Shutemov57c1ffc2013-11-14 14:30:45 -0800759#if USE_SPLIT_PTE_PTLOCKS
Kirill A. Shutemov49076ec2013-11-14 14:31:51 -0800760 ptl = ptlock_ptr(page);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700761 spin_lock_nest_lock(ptl, &mm->page_table_lock);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700762#endif
763
764 return ptl;
765}
766
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700767static void xen_pte_unlock(void *v)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700768{
769 spinlock_t *ptl = v;
770 spin_unlock(ptl);
771}
772
773static void xen_do_pin(unsigned level, unsigned long pfn)
774{
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800775 struct mmuext_op op;
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700776
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800777 op.cmd = level;
778 op.arg1.mfn = pfn_to_mfn(pfn);
779
780 xen_extend_mmuext_op(&op);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700781}
782
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700783static int xen_pin_page(struct mm_struct *mm, struct page *page,
784 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700785{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700786 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700787 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700788
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700789 if (pgfl)
790 flush = 0; /* already pinned */
791 else if (PageHighMem(page))
792 /* kmaps need flushing if we found an unpinned
793 highpage */
794 flush = 1;
795 else {
796 void *pt = lowmem_page_address(page);
797 unsigned long pfn = page_to_pfn(page);
798 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700799 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700800
801 flush = 0;
802
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700803 /*
804 * We need to hold the pagetable lock between the time
805 * we make the pagetable RO and when we actually pin
806 * it. If we don't, then other users may come in and
807 * attempt to update the pagetable by writing it,
808 * which will fail because the memory is RO but not
809 * pinned, so Xen won't do the trap'n'emulate.
810 *
811 * If we're using split pte locks, we can't hold the
812 * entire pagetable's worth of locks during the
813 * traverse, because we may wrap the preempt count (8
814 * bits). The solution is to mark RO and pin each PTE
815 * page while holding the lock. This means the number
816 * of locks we end up holding is never more than a
817 * batch size (~32 entries, at present).
818 *
819 * If we're not using split pte locks, we needn't pin
820 * the PTE pages independently, because we're
821 * protected by the overall pagetable lock.
822 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700823 ptl = NULL;
824 if (level == PT_PTE)
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700825 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700826
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700827 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
828 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700829 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
830
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700831 if (ptl) {
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700832 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
833
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700834 /* Queue a deferred unlock for when this batch
835 is completed. */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700836 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700837 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700838 }
839
840 return flush;
841}
842
843/* This is called just after a mm has been created, but it has not
844 been used yet. We need to make sure that its pagetable is all
845 read-only, and can be pinned. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700846static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700847{
Jeremy Fitzhardinge5f94fb52010-12-17 15:31:23 -0800848 trace_xen_mmu_pgd_pin(mm, pgd);
849
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700850 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700851
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000852 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100853 /* re-enable interrupts for flushing */
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700854 xen_mc_issue(0);
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100855
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700856 kmap_flush_unused();
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100857
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700858 xen_mc_batch();
859 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700860
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700861#ifdef CONFIG_X86_64
862 {
863 pgd_t *user_pgd = xen_get_user_pgd(pgd);
864
865 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
866
867 if (user_pgd) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700868 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
Tejf63c2f22008-12-16 11:56:06 -0800869 xen_do_pin(MMUEXT_PIN_L4_TABLE,
870 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700871 }
872 }
873#else /* CONFIG_X86_32 */
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700874#ifdef CONFIG_X86_PAE
875 /* Need to make sure unshared kernel PMD is pinnable */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -0800876 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700877 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700878#endif
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100879 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700880#endif /* CONFIG_X86_64 */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700881 xen_mc_issue(0);
882}
883
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700884static void xen_pgd_pin(struct mm_struct *mm)
885{
886 __xen_pgd_pin(mm, mm->pgd);
887}
888
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100889/*
890 * On save, we need to pin all pagetables to make sure they get their
891 * mfns turned into pfns. Search the list for any unpinned pgds and pin
892 * them (unpinned pgds are not currently in use, probably because the
893 * process is under construction or destruction).
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700894 *
895 * Expected to be called in stop_machine() ("equivalent to taking
896 * every spinlock in the system"), so the locking doesn't really
897 * matter all that much.
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100898 */
899void xen_mm_pin_all(void)
900{
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100901 struct page *page;
902
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800903 spin_lock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100904
905 list_for_each_entry(page, &pgd_list, lru) {
906 if (!PagePinned(page)) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700907 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100908 SetPageSavePinned(page);
909 }
910 }
911
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800912 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100913}
914
Eduardo Habkostc1f2f092008-07-08 15:06:24 -0700915/*
916 * The init_mm pagetable is really pinned as soon as its created, but
917 * that's before we have page structures to store the bits. So do all
918 * the book-keeping now.
919 */
Daniel Kiper3f5089532011-05-12 17:19:53 -0400920static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700921 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700922{
923 SetPagePinned(page);
924 return 0;
925}
926
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -0700927static void __init xen_mark_init_mm_pinned(void)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700928{
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700929 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700930}
931
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700932static int xen_unpin_page(struct mm_struct *mm, struct page *page,
933 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700934{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700935 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700936
937 if (pgfl && !PageHighMem(page)) {
938 void *pt = lowmem_page_address(page);
939 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700940 spinlock_t *ptl = NULL;
941 struct multicall_space mcs;
942
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700943 /*
944 * Do the converse to pin_page. If we're using split
945 * pte locks, we must be holding the lock for while
946 * the pte page is unpinned but still RO to prevent
947 * concurrent updates from seeing it in this
948 * partially-pinned state.
949 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700950 if (level == PT_PTE) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700951 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700952
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700953 if (ptl)
954 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700955 }
956
957 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700958
959 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
960 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700961 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
962
963 if (ptl) {
964 /* unlock when batch completed */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700965 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700966 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700967 }
968
969 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700970}
971
972/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700973static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700974{
Jeremy Fitzhardinge5f94fb52010-12-17 15:31:23 -0800975 trace_xen_mmu_pgd_unpin(mm, pgd);
976
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700977 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700978
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700979 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700980
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700981#ifdef CONFIG_X86_64
982 {
983 pgd_t *user_pgd = xen_get_user_pgd(pgd);
984
985 if (user_pgd) {
Tejf63c2f22008-12-16 11:56:06 -0800986 xen_do_pin(MMUEXT_UNPIN_TABLE,
987 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700988 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700989 }
990 }
991#endif
992
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700993#ifdef CONFIG_X86_PAE
994 /* Need to make sure unshared kernel PMD is unpinned */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -0800995 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700996 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700997#endif
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700998
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000999 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001000
1001 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001002}
1003
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001004static void xen_pgd_unpin(struct mm_struct *mm)
1005{
1006 __xen_pgd_unpin(mm, mm->pgd);
1007}
1008
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001009/*
1010 * On resume, undo any pinning done at save, so that the rest of the
1011 * kernel doesn't see any unexpected pinned pagetables.
1012 */
1013void xen_mm_unpin_all(void)
1014{
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001015 struct page *page;
1016
Andrea Arcangelia79e53d2011-02-16 15:45:22 -08001017 spin_lock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001018
1019 list_for_each_entry(page, &pgd_list, lru) {
1020 if (PageSavePinned(page)) {
1021 BUG_ON(!PagePinned(page));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001022 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001023 ClearPageSavePinned(page);
1024 }
1025 }
1026
Andrea Arcangelia79e53d2011-02-16 15:45:22 -08001027 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001028}
1029
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001030static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001031{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001032 spin_lock(&next->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001033 xen_pgd_pin(next);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001034 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001035}
1036
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001037static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001038{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001039 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001040 xen_pgd_pin(mm);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001041 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001042}
1043
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001044
1045#ifdef CONFIG_SMP
1046/* Another cpu may still have their %cr3 pointing at the pagetable, so
1047 we need to repoint it somewhere else before we can unpin it. */
1048static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001049{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001050 struct mm_struct *mm = info;
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001051 struct mm_struct *active_mm;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001052
Alex Shi2113f462012-01-13 23:53:35 +08001053 active_mm = this_cpu_read(cpu_tlbstate.active_mm);
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001054
Alex Shi2113f462012-01-13 23:53:35 +08001055 if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001056 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001057
1058 /* If this cpu still has a stale cr3 reference, then make sure
1059 it has been flushed. */
Alex Shi2113f462012-01-13 23:53:35 +08001060 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001061 load_cr3(swapper_pg_dir);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001062}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001063
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001064static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001065{
Mike Travise4d98202008-12-16 17:34:05 -08001066 cpumask_var_t mask;
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001067 unsigned cpu;
1068
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001069 if (current->active_mm == mm) {
1070 if (current->mm == mm)
1071 load_cr3(swapper_pg_dir);
1072 else
1073 leave_mm(smp_processor_id());
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001074 }
1075
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001076 /* Get the "official" set of cpus referring to our pagetable. */
Mike Travise4d98202008-12-16 17:34:05 -08001077 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1078 for_each_online_cpu(cpu) {
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001079 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
Mike Travise4d98202008-12-16 17:34:05 -08001080 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1081 continue;
1082 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1083 }
1084 return;
1085 }
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001086 cpumask_copy(mask, mm_cpumask(mm));
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001087
1088 /* It's possible that a vcpu may have a stale reference to our
1089 cr3, because its in lazy mode, and it hasn't yet flushed
1090 its set of pending hypercalls yet. In this case, we can
1091 look at its actual current cr3 value, and force it to flush
1092 if needed. */
1093 for_each_online_cpu(cpu) {
1094 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
Mike Travise4d98202008-12-16 17:34:05 -08001095 cpumask_set_cpu(cpu, mask);
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001096 }
1097
Mike Travise4d98202008-12-16 17:34:05 -08001098 if (!cpumask_empty(mask))
1099 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1100 free_cpumask_var(mask);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001101}
1102#else
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001103static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001104{
1105 if (current->active_mm == mm)
1106 load_cr3(swapper_pg_dir);
1107}
1108#endif
1109
1110/*
1111 * While a process runs, Xen pins its pagetables, which means that the
1112 * hypervisor forces it to be read-only, and it controls all updates
1113 * to it. This means that all pagetable updates have to go via the
1114 * hypervisor, which is moderately expensive.
1115 *
1116 * Since we're pulling the pagetable down, we switch to use init_mm,
1117 * unpin old process pagetable and mark it all read-write, which
1118 * allows further operations on it to be simple memory accesses.
1119 *
1120 * The only subtle point is that another CPU may be still using the
1121 * pagetable because of lazy tlb flushing. This means we need need to
1122 * switch all CPUs off this pagetable before we can unpin it.
1123 */
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001124static void xen_exit_mmap(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001125{
1126 get_cpu(); /* make sure we don't move around */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001127 xen_drop_mm_ref(mm);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001128 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001129
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001130 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -07001131
1132 /* pgd may not be pinned in the error exit path of execve */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001133 if (xen_page_pinned(mm->pgd))
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001134 xen_pgd_unpin(mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001135
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001136 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001137}
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07001138
Attilio Raoc7112882012-08-21 21:22:40 +01001139static void xen_post_allocator_init(void);
1140
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001141#ifdef CONFIG_X86_64
1142static void __init xen_cleanhighmap(unsigned long vaddr,
1143 unsigned long vaddr_end)
1144{
1145 unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
1146 pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
1147
1148 /* NOTE: The loop is more greedy than the cleanup_highmap variant.
1149 * We include the PMD passed in on _both_ boundaries. */
1150 for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE));
1151 pmd++, vaddr += PMD_SIZE) {
1152 if (pmd_none(*pmd))
1153 continue;
1154 if (vaddr < (unsigned long) _text || vaddr > kernel_end)
1155 set_pmd(pmd, __pmd(0));
1156 }
1157 /* In case we did something silly, we should crash in this function
1158 * instead of somewhere later and be confusing. */
1159 xen_mc_flush();
1160}
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001161static void __init xen_pagetable_p2m_copy(void)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001162{
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001163 unsigned long size;
1164 unsigned long addr;
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001165 unsigned long new_mfn_list;
1166
1167 if (xen_feature(XENFEAT_auto_translated_physmap))
1168 return;
1169
1170 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1171
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001172 new_mfn_list = xen_revector_p2m_tree();
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001173 /* No memory or already called. */
1174 if (!new_mfn_list || new_mfn_list == xen_start_info->mfn_list)
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001175 return;
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001176
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001177 /* using __ka address and sticking INVALID_P2M_ENTRY! */
1178 memset((void *)xen_start_info->mfn_list, 0xff, size);
1179
1180 /* We should be in __ka space. */
1181 BUG_ON(xen_start_info->mfn_list < __START_KERNEL_map);
1182 addr = xen_start_info->mfn_list;
1183 /* We roundup to the PMD, which means that if anybody at this stage is
1184 * using the __ka address of xen_start_info or xen_start_info->shared_info
1185 * they are in going to crash. Fortunatly we have already revectored
1186 * in xen_setup_kernel_pagetable and in xen_setup_shared_info. */
1187 size = roundup(size, PMD_SIZE);
1188 xen_cleanhighmap(addr, addr + size);
1189
1190 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1191 memblock_free(__pa(xen_start_info->mfn_list), size);
1192 /* And revector! Bye bye old array */
1193 xen_start_info->mfn_list = new_mfn_list;
1194
Konrad Rzeszutek Wilk3aca7fb2012-08-14 14:34:00 -04001195 /* At this stage, cleanup_highmap has already cleaned __ka space
1196 * from _brk_limit way up to the max_pfn_mapped (which is the end of
1197 * the ramdisk). We continue on, erasing PMD entries that point to page
1198 * tables - do note that they are accessible at this stage via __va.
1199 * For good measure we also round up to the PMD - which means that if
1200 * anybody is using __ka address to the initial boot-stack - and try
1201 * to use it - they are going to crash. The xen_start_info has been
1202 * taken care of already in xen_setup_kernel_pagetable. */
1203 addr = xen_start_info->pt_base;
1204 size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE);
1205
1206 xen_cleanhighmap(addr, addr + size);
1207 xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
1208#ifdef DEBUG
1209 /* This is superflous and is not neccessary, but you know what
1210 * lets do it. The MODULES_VADDR -> MODULES_END should be clear of
1211 * anything at this stage. */
1212 xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
1213#endif
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001214}
1215#endif
1216
1217static void __init xen_pagetable_init(void)
1218{
1219 paging_init();
1220 xen_setup_shared_info();
1221#ifdef CONFIG_X86_64
1222 xen_pagetable_p2m_copy();
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001223#endif
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02001224 xen_post_allocator_init();
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001225}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001226static void xen_write_cr2(unsigned long cr2)
1227{
Alex Shi2113f462012-01-13 23:53:35 +08001228 this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001229}
1230
1231static unsigned long xen_read_cr2(void)
1232{
Alex Shi2113f462012-01-13 23:53:35 +08001233 return this_cpu_read(xen_vcpu)->arch.cr2;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001234}
1235
1236unsigned long xen_read_cr2_direct(void)
1237{
Alex Shi2113f462012-01-13 23:53:35 +08001238 return this_cpu_read(xen_vcpu_info.arch.cr2);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001239}
1240
Konrad Rzeszutek Wilk95a7d762012-10-31 12:38:31 -04001241void xen_flush_tlb_all(void)
1242{
1243 struct mmuext_op *op;
1244 struct multicall_space mcs;
1245
1246 trace_xen_mmu_flush_tlb_all(0);
1247
1248 preempt_disable();
1249
1250 mcs = xen_mc_entry(sizeof(*op));
1251
1252 op = mcs.args;
1253 op->cmd = MMUEXT_TLB_FLUSH_ALL;
1254 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1255
1256 xen_mc_issue(PARAVIRT_LAZY_MMU);
1257
1258 preempt_enable();
1259}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001260static void xen_flush_tlb(void)
1261{
1262 struct mmuext_op *op;
1263 struct multicall_space mcs;
1264
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001265 trace_xen_mmu_flush_tlb(0);
1266
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001267 preempt_disable();
1268
1269 mcs = xen_mc_entry(sizeof(*op));
1270
1271 op = mcs.args;
1272 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1273 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1274
1275 xen_mc_issue(PARAVIRT_LAZY_MMU);
1276
1277 preempt_enable();
1278}
1279
1280static void xen_flush_tlb_single(unsigned long addr)
1281{
1282 struct mmuext_op *op;
1283 struct multicall_space mcs;
1284
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001285 trace_xen_mmu_flush_tlb_single(addr);
1286
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001287 preempt_disable();
1288
1289 mcs = xen_mc_entry(sizeof(*op));
1290 op = mcs.args;
1291 op->cmd = MMUEXT_INVLPG_LOCAL;
1292 op->arg1.linear_addr = addr & PAGE_MASK;
1293 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1294
1295 xen_mc_issue(PARAVIRT_LAZY_MMU);
1296
1297 preempt_enable();
1298}
1299
1300static void xen_flush_tlb_others(const struct cpumask *cpus,
Alex Shie7b52ff2012-06-28 09:02:17 +08001301 struct mm_struct *mm, unsigned long start,
1302 unsigned long end)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001303{
1304 struct {
1305 struct mmuext_op op;
Konrad Rzeszutek Wilk32dd1192011-06-30 09:12:40 -04001306#ifdef CONFIG_SMP
Andrew Jones900cba82009-12-18 10:31:31 +01001307 DECLARE_BITMAP(mask, num_processors);
Konrad Rzeszutek Wilk32dd1192011-06-30 09:12:40 -04001308#else
1309 DECLARE_BITMAP(mask, NR_CPUS);
1310#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001311 } *args;
1312 struct multicall_space mcs;
1313
Alex Shie7b52ff2012-06-28 09:02:17 +08001314 trace_xen_mmu_flush_tlb_others(cpus, mm, start, end);
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001315
Jeremy Fitzhardingee3f8a742009-03-04 17:36:57 -08001316 if (cpumask_empty(cpus))
1317 return; /* nothing to do */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001318
1319 mcs = xen_mc_entry(sizeof(*args));
1320 args = mcs.args;
1321 args->op.arg2.vcpumask = to_cpumask(args->mask);
1322
1323 /* Remove us, and any offline CPUS. */
1324 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1325 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001326
Alex Shie7b52ff2012-06-28 09:02:17 +08001327 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
Alex Shice7184b2012-08-24 08:55:13 +00001328 if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001329 args->op.cmd = MMUEXT_INVLPG_MULTI;
Alex Shie7b52ff2012-06-28 09:02:17 +08001330 args->op.arg1.linear_addr = start;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001331 }
1332
1333 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1334
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001335 xen_mc_issue(PARAVIRT_LAZY_MMU);
1336}
1337
1338static unsigned long xen_read_cr3(void)
1339{
Alex Shi2113f462012-01-13 23:53:35 +08001340 return this_cpu_read(xen_cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001341}
1342
1343static void set_current_cr3(void *v)
1344{
Alex Shi2113f462012-01-13 23:53:35 +08001345 this_cpu_write(xen_current_cr3, (unsigned long)v);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001346}
1347
1348static void __xen_write_cr3(bool kernel, unsigned long cr3)
1349{
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001350 struct mmuext_op op;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001351 unsigned long mfn;
1352
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001353 trace_xen_mmu_write_cr3(kernel, cr3);
1354
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001355 if (cr3)
1356 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1357 else
1358 mfn = 0;
1359
1360 WARN_ON(mfn == 0 && kernel);
1361
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001362 op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1363 op.arg1.mfn = mfn;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001364
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001365 xen_extend_mmuext_op(&op);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001366
1367 if (kernel) {
Alex Shi2113f462012-01-13 23:53:35 +08001368 this_cpu_write(xen_cr3, cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001369
1370 /* Update xen_current_cr3 once the batch has actually
1371 been submitted. */
1372 xen_mc_callback(set_current_cr3, (void *)cr3);
1373 }
1374}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001375static void xen_write_cr3(unsigned long cr3)
1376{
1377 BUG_ON(preemptible());
1378
1379 xen_mc_batch(); /* disables interrupts */
1380
1381 /* Update while interrupts are disabled, so its atomic with
1382 respect to ipis */
Alex Shi2113f462012-01-13 23:53:35 +08001383 this_cpu_write(xen_cr3, cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001384
1385 __xen_write_cr3(true, cr3);
1386
1387#ifdef CONFIG_X86_64
1388 {
1389 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1390 if (user_pgd)
1391 __xen_write_cr3(false, __pa(user_pgd));
1392 else
1393 __xen_write_cr3(false, 0);
1394 }
1395#endif
1396
1397 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1398}
1399
Konrad Rzeszutek Wilk0cc91292013-02-22 17:35:13 -08001400#ifdef CONFIG_X86_64
1401/*
1402 * At the start of the day - when Xen launches a guest, it has already
1403 * built pagetables for the guest. We diligently look over them
1404 * in xen_setup_kernel_pagetable and graft as appropiate them in the
1405 * init_level4_pgt and its friends. Then when we are happy we load
1406 * the new init_level4_pgt - and continue on.
1407 *
1408 * The generic code starts (start_kernel) and 'init_mem_mapping' sets
1409 * up the rest of the pagetables. When it has completed it loads the cr3.
1410 * N.B. that baremetal would start at 'start_kernel' (and the early
1411 * #PF handler would create bootstrap pagetables) - so we are running
1412 * with the same assumptions as what to do when write_cr3 is executed
1413 * at this point.
1414 *
1415 * Since there are no user-page tables at all, we have two variants
1416 * of xen_write_cr3 - the early bootup (this one), and the late one
1417 * (xen_write_cr3). The reason we have to do that is that in 64-bit
1418 * the Linux kernel and user-space are both in ring 3 while the
1419 * hypervisor is in ring 0.
1420 */
1421static void __init xen_write_cr3_init(unsigned long cr3)
1422{
1423 BUG_ON(preemptible());
1424
1425 xen_mc_batch(); /* disables interrupts */
1426
1427 /* Update while interrupts are disabled, so its atomic with
1428 respect to ipis */
1429 this_cpu_write(xen_cr3, cr3);
1430
1431 __xen_write_cr3(true, cr3);
1432
1433 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
Konrad Rzeszutek Wilk0cc91292013-02-22 17:35:13 -08001434}
1435#endif
1436
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001437static int xen_pgd_alloc(struct mm_struct *mm)
1438{
1439 pgd_t *pgd = mm->pgd;
1440 int ret = 0;
1441
1442 BUG_ON(PagePinned(virt_to_page(pgd)));
1443
1444#ifdef CONFIG_X86_64
1445 {
1446 struct page *page = virt_to_page(pgd);
1447 pgd_t *user_pgd;
1448
1449 BUG_ON(page->private != 0);
1450
1451 ret = -ENOMEM;
1452
1453 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1454 page->private = (unsigned long)user_pgd;
1455
1456 if (user_pgd != NULL) {
Andy Lutomirskif40c3302014-05-05 12:19:36 -07001457 user_pgd[pgd_index(VSYSCALL_ADDR)] =
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001458 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1459 ret = 0;
1460 }
1461
1462 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1463 }
1464#endif
1465
1466 return ret;
1467}
1468
1469static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1470{
1471#ifdef CONFIG_X86_64
1472 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1473
1474 if (user_pgd)
1475 free_page((unsigned long)user_pgd);
1476#endif
1477}
1478
Stefano Stabelliniee176452011-04-19 14:47:31 +01001479#ifdef CONFIG_X86_32
Daniel Kiper3f5089532011-05-12 17:19:53 -04001480static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001481{
1482 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1483 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1484 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1485 pte_val_ma(pte));
Stefano Stabelliniee176452011-04-19 14:47:31 +01001486
1487 return pte;
1488}
1489#else /* CONFIG_X86_64 */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001490static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
Stefano Stabelliniee176452011-04-19 14:47:31 +01001491{
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001492 return pte;
1493}
Stefano Stabelliniee176452011-04-19 14:47:31 +01001494#endif /* CONFIG_X86_64 */
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001495
David Vrabeld095d432012-07-09 11:39:05 +01001496/*
1497 * Init-time set_pte while constructing initial pagetables, which
1498 * doesn't allow RO page table pages to be remapped RW.
1499 *
David Vrabel66a27dd2012-07-09 11:39:06 +01001500 * If there is no MFN for this PFN then this page is initially
1501 * ballooned out so clear the PTE (as in decrease_reservation() in
1502 * drivers/xen/balloon.c).
1503 *
David Vrabeld095d432012-07-09 11:39:05 +01001504 * Many of these PTE updates are done on unpinned and writable pages
1505 * and doing a hypercall for these is unnecessary and expensive. At
1506 * this point it is not possible to tell if a page is pinned or not,
1507 * so always write the PTE directly and rely on Xen trapping and
1508 * emulating any updates as necessary.
1509 */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001510static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001511{
David Vrabel66a27dd2012-07-09 11:39:06 +01001512 if (pte_mfn(pte) != INVALID_P2M_ENTRY)
1513 pte = mask_rw_pte(ptep, pte);
1514 else
1515 pte = __pte_ma(0);
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001516
David Vrabeld095d432012-07-09 11:39:05 +01001517 native_set_pte(ptep, pte);
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001518}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001519
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001520static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1521{
1522 struct mmuext_op op;
1523 op.cmd = cmd;
1524 op.arg1.mfn = pfn_to_mfn(pfn);
1525 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1526 BUG();
1527}
1528
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001529/* Early in boot, while setting up the initial pagetable, assume
1530 everything is pinned. */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001531static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001532{
1533#ifdef CONFIG_FLATMEM
1534 BUG_ON(mem_map); /* should only be used early */
1535#endif
1536 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001537 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1538}
1539
1540/* Used for pmd and pud */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001541static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001542{
1543#ifdef CONFIG_FLATMEM
1544 BUG_ON(mem_map); /* should only be used early */
1545#endif
1546 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001547}
1548
1549/* Early release_pte assumes that all pts are pinned, since there's
1550 only init_mm and anything attached to that is pinned. */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001551static void __init xen_release_pte_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001552{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001553 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001554 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1555}
1556
Daniel Kiper3f5089532011-05-12 17:19:53 -04001557static void __init xen_release_pmd_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001558{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001559 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001560}
1561
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001562static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1563{
1564 struct multicall_space mcs;
1565 struct mmuext_op *op;
1566
1567 mcs = __xen_mc_entry(sizeof(*op));
1568 op = mcs.args;
1569 op->cmd = cmd;
1570 op->arg1.mfn = pfn_to_mfn(pfn);
1571
1572 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1573}
1574
1575static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1576{
1577 struct multicall_space mcs;
1578 unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1579
1580 mcs = __xen_mc_entry(0);
1581 MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1582 pfn_pte(pfn, prot), 0);
1583}
1584
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001585/* This needs to make sure the new pte page is pinned iff its being
1586 attached to a pinned pagetable. */
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001587static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1588 unsigned level)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001589{
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001590 bool pinned = PagePinned(virt_to_page(mm->pgd));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001591
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001592 trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001593
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001594 if (pinned) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001595 struct page *page = pfn_to_page(pfn);
1596
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001597 SetPagePinned(page);
1598
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001599 if (!PageHighMem(page)) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001600 xen_mc_batch();
1601
1602 __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1603
Kirill A. Shutemov57c1ffc2013-11-14 14:30:45 -08001604 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001605 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1606
1607 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001608 } else {
1609 /* make sure there are no stray mappings of
1610 this page */
1611 kmap_flush_unused();
1612 }
1613 }
1614}
1615
1616static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1617{
1618 xen_alloc_ptpage(mm, pfn, PT_PTE);
1619}
1620
1621static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1622{
1623 xen_alloc_ptpage(mm, pfn, PT_PMD);
1624}
1625
1626/* This should never happen until we're OK to use struct page */
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001627static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001628{
1629 struct page *page = pfn_to_page(pfn);
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001630 bool pinned = PagePinned(page);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001631
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001632 trace_xen_mmu_release_ptpage(pfn, level, pinned);
1633
1634 if (pinned) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001635 if (!PageHighMem(page)) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001636 xen_mc_batch();
1637
Kirill A. Shutemov57c1ffc2013-11-14 14:30:45 -08001638 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001639 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1640
1641 __set_pfn_prot(pfn, PAGE_KERNEL);
1642
1643 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001644 }
1645 ClearPagePinned(page);
1646 }
1647}
1648
1649static void xen_release_pte(unsigned long pfn)
1650{
1651 xen_release_ptpage(pfn, PT_PTE);
1652}
1653
1654static void xen_release_pmd(unsigned long pfn)
1655{
1656 xen_release_ptpage(pfn, PT_PMD);
1657}
1658
1659#if PAGETABLE_LEVELS == 4
1660static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1661{
1662 xen_alloc_ptpage(mm, pfn, PT_PUD);
1663}
1664
1665static void xen_release_pud(unsigned long pfn)
1666{
1667 xen_release_ptpage(pfn, PT_PUD);
1668}
1669#endif
1670
1671void __init xen_reserve_top(void)
1672{
1673#ifdef CONFIG_X86_32
1674 unsigned long top = HYPERVISOR_VIRT_START;
1675 struct xen_platform_parameters pp;
1676
1677 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1678 top = pp.virt_start;
1679
1680 reserve_top_address(-top);
1681#endif /* CONFIG_X86_32 */
1682}
1683
1684/*
1685 * Like __va(), but returns address in the kernel mapping (which is
1686 * all we have until the physical memory mapping has been set up.
1687 */
1688static void *__ka(phys_addr_t paddr)
1689{
1690#ifdef CONFIG_X86_64
1691 return (void *)(paddr + __START_KERNEL_map);
1692#else
1693 return __va(paddr);
1694#endif
1695}
1696
1697/* Convert a machine address to physical address */
1698static unsigned long m2p(phys_addr_t maddr)
1699{
1700 phys_addr_t paddr;
1701
1702 maddr &= PTE_PFN_MASK;
1703 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1704
1705 return paddr;
1706}
1707
1708/* Convert a machine address to kernel virtual */
1709static void *m2v(phys_addr_t maddr)
1710{
1711 return __ka(m2p(maddr));
1712}
1713
Juan Quintela4ec53872010-09-02 15:45:43 +01001714/* Set the page permissions on an identity-mapped pages */
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001715static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001716{
1717 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1718 pte_t pte = pfn_pte(pfn, prot);
1719
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001720 /* For PVH no need to set R/O or R/W to pin them or unpin them. */
1721 if (xen_feature(XENFEAT_auto_translated_physmap))
1722 return;
1723
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001724 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001725 BUG();
1726}
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001727static void set_page_prot(void *addr, pgprot_t prot)
1728{
1729 return set_page_prot_flags(addr, prot, UVMF_NONE);
1730}
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -04001731#ifdef CONFIG_X86_32
Daniel Kiper3f5089532011-05-12 17:19:53 -04001732static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001733{
1734 unsigned pmdidx, pteidx;
1735 unsigned ident_pte;
1736 unsigned long pfn;
1737
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001738 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1739 PAGE_SIZE);
1740
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001741 ident_pte = 0;
1742 pfn = 0;
1743 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1744 pte_t *pte_page;
1745
1746 /* Reuse or allocate a page of ptes */
1747 if (pmd_present(pmd[pmdidx]))
1748 pte_page = m2v(pmd[pmdidx].pmd);
1749 else {
1750 /* Check for free pte pages */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001751 if (ident_pte == LEVEL1_IDENT_ENTRIES)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001752 break;
1753
1754 pte_page = &level1_ident_pgt[ident_pte];
1755 ident_pte += PTRS_PER_PTE;
1756
1757 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1758 }
1759
1760 /* Install mappings */
1761 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1762 pte_t pte;
1763
Stefano Stabellinia91d9282011-06-03 09:51:34 +00001764#ifdef CONFIG_X86_32
1765 if (pfn > max_pfn_mapped)
1766 max_pfn_mapped = pfn;
1767#endif
1768
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001769 if (!pte_none(pte_page[pteidx]))
1770 continue;
1771
1772 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1773 pte_page[pteidx] = pte;
1774 }
1775 }
1776
1777 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1778 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1779
1780 set_page_prot(pmd, PAGE_KERNEL_RO);
1781}
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -04001782#endif
Ian Campbell7e775062010-09-30 12:37:26 +01001783void __init xen_setup_machphys_mapping(void)
1784{
1785 struct xen_machphys_mapping mapping;
Ian Campbell7e775062010-09-30 12:37:26 +01001786
1787 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1788 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
Jan Beulichccbcdf72011-08-16 15:07:41 +01001789 machine_to_phys_nr = mapping.max_mfn + 1;
Ian Campbell7e775062010-09-30 12:37:26 +01001790 } else {
Jan Beulichccbcdf72011-08-16 15:07:41 +01001791 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
Ian Campbell7e775062010-09-30 12:37:26 +01001792 }
Jan Beulichccbcdf72011-08-16 15:07:41 +01001793#ifdef CONFIG_X86_32
Jan Beulich61cca2f2011-09-15 08:52:40 +01001794 WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
1795 < machine_to_phys_mapping);
Jan Beulichccbcdf72011-08-16 15:07:41 +01001796#endif
Ian Campbell7e775062010-09-30 12:37:26 +01001797}
1798
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001799#ifdef CONFIG_X86_64
1800static void convert_pfn_mfn(void *v)
1801{
1802 pte_t *pte = v;
1803 int i;
1804
1805 /* All levels are converted the same way, so just treat them
1806 as ptes. */
1807 for (i = 0; i < PTRS_PER_PTE; i++)
1808 pte[i] = xen_make_pte(pte[i].pte);
1809}
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001810static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
1811 unsigned long addr)
1812{
1813 if (*pt_base == PFN_DOWN(__pa(addr))) {
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001814 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001815 clear_page((void *)addr);
1816 (*pt_base)++;
1817 }
1818 if (*pt_end == PFN_DOWN(__pa(addr))) {
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001819 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001820 clear_page((void *)addr);
1821 (*pt_end)--;
1822 }
1823}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001824/*
Lucas De Marchi0d2eb442011-03-17 16:24:16 -03001825 * Set up the initial kernel pagetable.
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001826 *
1827 * We can construct this by grafting the Xen provided pagetable into
1828 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
Stefan Bader0b5a5062014-09-02 11:16:01 +01001829 * level2_ident_pgt, and level2_kernel_pgt. This means that only the
1830 * kernel has a physical mapping to start with - but that's enough to
1831 * get __va working. We need to fill in the rest of the physical
1832 * mapping once some sort of allocator has been set up. NOTE: for
1833 * PVH, the page tables are native.
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001834 */
Konrad Rzeszutek Wilk3699aad2012-06-28 22:47:35 -04001835void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001836{
1837 pud_t *l3;
1838 pmd_t *l2;
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001839 unsigned long addr[3];
1840 unsigned long pt_base, pt_end;
1841 unsigned i;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001842
Stefano Stabellini14988a42011-02-18 11:32:40 +00001843 /* max_pfn_mapped is the last pfn mapped in the initial memory
1844 * mappings. Considering that on Xen after the kernel mappings we
1845 * have the mappings of some pages that don't exist in pfn space, we
1846 * set max_pfn_mapped to the last real pfn mapped. */
1847 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1848
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001849 pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
1850 pt_end = pt_base + xen_start_info->nr_pt_frames;
1851
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001852 /* Zap identity mapping */
1853 init_level4_pgt[0] = __pgd(0);
1854
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001855 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1856 /* Pre-constructed entries are in pfn, so convert to mfn */
1857 /* L4[272] -> level3_ident_pgt
1858 * L4[511] -> level3_kernel_pgt */
1859 convert_pfn_mfn(init_level4_pgt);
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001860
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001861 /* L3_i[0] -> level2_ident_pgt */
1862 convert_pfn_mfn(level3_ident_pgt);
1863 /* L3_k[510] -> level2_kernel_pgt
Stefan Bader0b5a5062014-09-02 11:16:01 +01001864 * L3_k[511] -> level2_fixmap_pgt */
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001865 convert_pfn_mfn(level3_kernel_pgt);
Stefan Bader0b5a5062014-09-02 11:16:01 +01001866
1867 /* L3_k[511][506] -> level1_fixmap_pgt */
1868 convert_pfn_mfn(level2_fixmap_pgt);
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001869 }
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001870 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001871 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1872 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1873
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001874 addr[0] = (unsigned long)pgd;
1875 addr[1] = (unsigned long)l3;
1876 addr[2] = (unsigned long)l2;
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001877 /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
Stefan Bader0b5a5062014-09-02 11:16:01 +01001878 * Both L4[272][0] and L4[511][510] have entries that point to the same
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001879 * L2 (PMD) tables. Meaning that if you modify it in __va space
1880 * it will be also modified in the __ka space! (But if you just
1881 * modify the PMD table to point to other PTE's or none, then you
1882 * are OK - which is what cleanup_highmap does) */
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001883 copy_page(level2_ident_pgt, l2);
Stefan Bader0b5a5062014-09-02 11:16:01 +01001884 /* Graft it onto L4[511][510] */
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001885 copy_page(level2_kernel_pgt, l2);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001886
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001887 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1888 /* Make pagetable pieces RO */
1889 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1890 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1891 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1892 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1893 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
1894 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1895 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
Stefan Bader0b5a5062014-09-02 11:16:01 +01001896 set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001897
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001898 /* Pin down new L4 */
1899 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1900 PFN_DOWN(__pa_symbol(init_level4_pgt)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001901
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001902 /* Unpin Xen-provided one */
1903 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001904
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001905 /*
1906 * At this stage there can be no user pgd, and no page
1907 * structure to attach it to, so make sure we just set kernel
1908 * pgd.
1909 */
1910 xen_mc_batch();
1911 __xen_write_cr3(true, __pa(init_level4_pgt));
1912 xen_mc_issue(PARAVIRT_LAZY_CPU);
1913 } else
1914 native_write_cr3(__pa(init_level4_pgt));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001915
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001916 /* We can't that easily rip out L3 and L2, as the Xen pagetables are
1917 * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
1918 * the initial domain. For guests using the toolstack, they are in:
1919 * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
1920 * rip out the [L4] (pgd), but for guests we shave off three pages.
1921 */
1922 for (i = 0; i < ARRAY_SIZE(addr); i++)
1923 check_pt_base(&pt_base, &pt_end, addr[i]);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001924
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001925 /* Our (by three pages) smaller Xen pagetable that we are using */
1926 memblock_reserve(PFN_PHYS(pt_base), (pt_end - pt_base) * PAGE_SIZE);
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001927 /* Revector the xen_start_info */
1928 xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001929}
1930#else /* !CONFIG_X86_64 */
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001931static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
1932static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
1933
Daniel Kiper3f5089532011-05-12 17:19:53 -04001934static void __init xen_write_cr3_init(unsigned long cr3)
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001935{
1936 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
1937
1938 BUG_ON(read_cr3() != __pa(initial_page_table));
1939 BUG_ON(cr3 != __pa(swapper_pg_dir));
1940
1941 /*
1942 * We are switching to swapper_pg_dir for the first time (from
1943 * initial_page_table) and therefore need to mark that page
1944 * read-only and then pin it.
1945 *
1946 * Xen disallows sharing of kernel PMDs for PAE
1947 * guests. Therefore we must copy the kernel PMD from
1948 * initial_page_table into a new kernel PMD to be used in
1949 * swapper_pg_dir.
1950 */
1951 swapper_kernel_pmd =
1952 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001953 copy_page(swapper_kernel_pmd, initial_kernel_pmd);
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001954 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
1955 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
1956 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
1957
1958 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1959 xen_write_cr3(cr3);
1960 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
1961
1962 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
1963 PFN_DOWN(__pa(initial_page_table)));
1964 set_page_prot(initial_page_table, PAGE_KERNEL);
1965 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
1966
1967 pv_mmu_ops.write_cr3 = &xen_write_cr3;
1968}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001969
Konrad Rzeszutek Wilk3699aad2012-06-28 22:47:35 -04001970void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001971{
1972 pmd_t *kernel_pmd;
1973
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001974 initial_kernel_pmd =
1975 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
Jeremy Fitzhardingef0991802010-08-26 16:16:28 -07001976
Stefano Stabellinia91d9282011-06-03 09:51:34 +00001977 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
1978 xen_start_info->nr_pt_frames * PAGE_SIZE +
1979 512*1024);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001980
1981 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001982 copy_page(initial_kernel_pmd, kernel_pmd);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001983
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001984 xen_map_identity_early(initial_kernel_pmd, max_pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001985
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001986 copy_page(initial_page_table, pgd);
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001987 initial_page_table[KERNEL_PGD_BOUNDARY] =
1988 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001989
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001990 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
1991 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001992 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
1993
1994 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1995
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001996 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
1997 PFN_DOWN(__pa(initial_page_table)));
1998 xen_write_cr3(__pa(initial_page_table));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001999
Tejun Heo24aa0782011-07-12 11:16:06 +02002000 memblock_reserve(__pa(xen_start_info->pt_base),
Konrad Rzeszutek Wilkdc6821e2012-01-07 21:27:38 -05002001 xen_start_info->nr_pt_frames * PAGE_SIZE);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002002}
2003#endif /* CONFIG_X86_64 */
2004
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002005static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
2006
Masami Hiramatsu3b3809a2009-04-09 10:55:33 -07002007static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002008{
2009 pte_t pte;
2010
2011 phys >>= PAGE_SHIFT;
2012
2013 switch (idx) {
2014 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
Kees Cook4eefbe72013-04-10 12:24:22 -07002015 case FIX_RO_IDT:
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002016#ifdef CONFIG_X86_32
2017 case FIX_WP_TEST:
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002018# ifdef CONFIG_HIGHMEM
2019 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
2020# endif
2021#else
Andy Lutomirskif40c3302014-05-05 12:19:36 -07002022 case VSYSCALL_PAGE:
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002023#endif
Jeremy Fitzhardinge3ecb1b72009-03-07 23:48:41 -08002024 case FIX_TEXT_POKE0:
2025 case FIX_TEXT_POKE1:
2026 /* All local page mappings */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002027 pte = pfn_pte(phys, prot);
2028 break;
2029
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002030#ifdef CONFIG_X86_LOCAL_APIC
2031 case FIX_APIC_BASE: /* maps dummy local APIC */
2032 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2033 break;
2034#endif
2035
2036#ifdef CONFIG_X86_IO_APIC
2037 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
2038 /*
2039 * We just don't map the IO APIC - all access is via
2040 * hypercalls. Keep the address in the pte for reference.
2041 */
Konrad Rzeszutek Wilk27abd142012-04-16 13:53:40 -04002042 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002043 break;
2044#endif
2045
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08002046 case FIX_PARAVIRT_BOOTMAP:
2047 /* This is an MFN, but it isn't an IO mapping from the
2048 IO domain */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002049 pte = mfn_pte(phys, prot);
2050 break;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08002051
2052 default:
2053 /* By default, set_fixmap is used for hardware mappings */
David Vrabel7f2f8822014-01-08 14:01:01 +00002054 pte = mfn_pte(phys, prot);
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08002055 break;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002056 }
2057
2058 __native_set_fixmap(idx, pte);
2059
2060#ifdef CONFIG_X86_64
2061 /* Replicate changes to map the vsyscall page into the user
2062 pagetable vsyscall mapping. */
Andy Lutomirskif40c3302014-05-05 12:19:36 -07002063 if (idx == VSYSCALL_PAGE) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002064 unsigned long vaddr = __fix_to_virt(idx);
2065 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
2066 }
2067#endif
2068}
2069
Daniel Kiper3f5089532011-05-12 17:19:53 -04002070static void __init xen_post_allocator_init(void)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002071{
Mukesh Rathor4e44e442013-12-31 12:41:27 -05002072 if (xen_feature(XENFEAT_auto_translated_physmap))
2073 return;
2074
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002075 pv_mmu_ops.set_pte = xen_set_pte;
2076 pv_mmu_ops.set_pmd = xen_set_pmd;
2077 pv_mmu_ops.set_pud = xen_set_pud;
2078#if PAGETABLE_LEVELS == 4
2079 pv_mmu_ops.set_pgd = xen_set_pgd;
2080#endif
2081
2082 /* This will work as long as patching hasn't happened yet
2083 (which it hasn't) */
2084 pv_mmu_ops.alloc_pte = xen_alloc_pte;
2085 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
2086 pv_mmu_ops.release_pte = xen_release_pte;
2087 pv_mmu_ops.release_pmd = xen_release_pmd;
2088#if PAGETABLE_LEVELS == 4
2089 pv_mmu_ops.alloc_pud = xen_alloc_pud;
2090 pv_mmu_ops.release_pud = xen_release_pud;
2091#endif
2092
2093#ifdef CONFIG_X86_64
Konrad Rzeszutek Wilkd3eb2c82013-03-22 10:34:28 -04002094 pv_mmu_ops.write_cr3 = &xen_write_cr3;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002095 SetPagePinned(virt_to_page(level3_user_vsyscall));
2096#endif
2097 xen_mark_init_mm_pinned();
2098}
2099
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002100static void xen_leave_lazy_mmu(void)
2101{
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08002102 preempt_disable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002103 xen_mc_flush();
2104 paravirt_leave_lazy_mmu();
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08002105 preempt_enable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002106}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002107
Daniel Kiper3f5089532011-05-12 17:19:53 -04002108static const struct pv_mmu_ops xen_mmu_ops __initconst = {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002109 .read_cr2 = xen_read_cr2,
2110 .write_cr2 = xen_write_cr2,
2111
2112 .read_cr3 = xen_read_cr3,
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002113 .write_cr3 = xen_write_cr3_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002114
2115 .flush_tlb_user = xen_flush_tlb,
2116 .flush_tlb_kernel = xen_flush_tlb,
2117 .flush_tlb_single = xen_flush_tlb_single,
2118 .flush_tlb_others = xen_flush_tlb_others,
2119
2120 .pte_update = paravirt_nop,
2121 .pte_update_defer = paravirt_nop,
2122
2123 .pgd_alloc = xen_pgd_alloc,
2124 .pgd_free = xen_pgd_free,
2125
2126 .alloc_pte = xen_alloc_pte_init,
2127 .release_pte = xen_release_pte_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002128 .alloc_pmd = xen_alloc_pmd_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002129 .release_pmd = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002130
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002131 .set_pte = xen_set_pte_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002132 .set_pte_at = xen_set_pte_at,
2133 .set_pmd = xen_set_pmd_hyper,
2134
2135 .ptep_modify_prot_start = __ptep_modify_prot_start,
2136 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2137
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002138 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2139 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002140
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002141 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
2142 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002143
2144#ifdef CONFIG_X86_PAE
2145 .set_pte_atomic = xen_set_pte_atomic,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002146 .pte_clear = xen_pte_clear,
2147 .pmd_clear = xen_pmd_clear,
2148#endif /* CONFIG_X86_PAE */
2149 .set_pud = xen_set_pud_hyper,
2150
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002151 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2152 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002153
2154#if PAGETABLE_LEVELS == 4
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002155 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2156 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002157 .set_pgd = xen_set_pgd_hyper,
2158
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002159 .alloc_pud = xen_alloc_pmd_init,
2160 .release_pud = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002161#endif /* PAGETABLE_LEVELS == 4 */
2162
2163 .activate_mm = xen_activate_mm,
2164 .dup_mmap = xen_dup_mmap,
2165 .exit_mmap = xen_exit_mmap,
2166
2167 .lazy_mode = {
2168 .enter = paravirt_enter_lazy_mmu,
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002169 .leave = xen_leave_lazy_mmu,
Boris Ostrovsky511ba862013-03-23 09:36:36 -04002170 .flush = paravirt_flush_lazy_mmu,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002171 },
2172
2173 .set_fixmap = xen_set_fixmap,
2174};
2175
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002176void __init xen_init_mmu_ops(void)
2177{
Attilio Rao7737b212012-08-21 21:22:38 +01002178 x86_init.paging.pagetable_init = xen_pagetable_init;
Mukesh Rathor76bccef2014-01-03 09:48:08 -05002179
2180 /* Optimization - we can use the HVM one but it has no idea which
2181 * VCPUs are descheduled - which means that it will needlessly IPI
2182 * them. Xen knows so let it do the job.
2183 */
2184 if (xen_feature(XENFEAT_auto_translated_physmap)) {
2185 pv_mmu_ops.flush_tlb_others = xen_flush_tlb_others;
2186 return;
2187 }
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002188 pv_mmu_ops = xen_mmu_ops;
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -07002189
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002190 memset(dummy_mapping, 0xff, PAGE_SIZE);
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002191}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002192
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002193/* Protected by xen_reservation_lock. */
2194#define MAX_CONTIG_ORDER 9 /* 2MB */
2195static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2196
2197#define VOID_PTE (mfn_pte(0, __pgprot(0)))
2198static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2199 unsigned long *in_frames,
2200 unsigned long *out_frames)
2201{
2202 int i;
2203 struct multicall_space mcs;
2204
2205 xen_mc_batch();
2206 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2207 mcs = __xen_mc_entry(0);
2208
2209 if (in_frames)
2210 in_frames[i] = virt_to_mfn(vaddr);
2211
2212 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
Konrad Rzeszutek Wilk6eaa4122011-01-18 20:09:41 -05002213 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002214
2215 if (out_frames)
2216 out_frames[i] = virt_to_pfn(vaddr);
2217 }
2218 xen_mc_issue(0);
2219}
2220
2221/*
2222 * Update the pfn-to-mfn mappings for a virtual address range, either to
2223 * point to an array of mfns, or contiguously from a single starting
2224 * mfn.
2225 */
2226static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2227 unsigned long *mfns,
2228 unsigned long first_mfn)
2229{
2230 unsigned i, limit;
2231 unsigned long mfn;
2232
2233 xen_mc_batch();
2234
2235 limit = 1u << order;
2236 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2237 struct multicall_space mcs;
2238 unsigned flags;
2239
2240 mcs = __xen_mc_entry(0);
2241 if (mfns)
2242 mfn = mfns[i];
2243 else
2244 mfn = first_mfn + i;
2245
2246 if (i < (limit - 1))
2247 flags = 0;
2248 else {
2249 if (order == 0)
2250 flags = UVMF_INVLPG | UVMF_ALL;
2251 else
2252 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2253 }
2254
2255 MULTI_update_va_mapping(mcs.mc, vaddr,
2256 mfn_pte(mfn, PAGE_KERNEL), flags);
2257
2258 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2259 }
2260
2261 xen_mc_issue(0);
2262}
2263
2264/*
2265 * Perform the hypercall to exchange a region of our pfns to point to
2266 * memory with the required contiguous alignment. Takes the pfns as
2267 * input, and populates mfns as output.
2268 *
2269 * Returns a success code indicating whether the hypervisor was able to
2270 * satisfy the request or not.
2271 */
2272static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2273 unsigned long *pfns_in,
2274 unsigned long extents_out,
2275 unsigned int order_out,
2276 unsigned long *mfns_out,
2277 unsigned int address_bits)
2278{
2279 long rc;
2280 int success;
2281
2282 struct xen_memory_exchange exchange = {
2283 .in = {
2284 .nr_extents = extents_in,
2285 .extent_order = order_in,
2286 .extent_start = pfns_in,
2287 .domid = DOMID_SELF
2288 },
2289 .out = {
2290 .nr_extents = extents_out,
2291 .extent_order = order_out,
2292 .extent_start = mfns_out,
2293 .address_bits = address_bits,
2294 .domid = DOMID_SELF
2295 }
2296 };
2297
2298 BUG_ON(extents_in << order_in != extents_out << order_out);
2299
2300 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2301 success = (exchange.nr_exchanged == extents_in);
2302
2303 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2304 BUG_ON(success && (rc != 0));
2305
2306 return success;
2307}
2308
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002309int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
Stefano Stabellini69908902013-10-09 16:56:32 +00002310 unsigned int address_bits,
2311 dma_addr_t *dma_handle)
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002312{
2313 unsigned long *in_frames = discontig_frames, out_frame;
2314 unsigned long flags;
2315 int success;
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002316 unsigned long vstart = (unsigned long)phys_to_virt(pstart);
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002317
2318 /*
2319 * Currently an auto-translated guest will not perform I/O, nor will
2320 * it require PAE page directories below 4GB. Therefore any calls to
2321 * this function are redundant and can be ignored.
2322 */
2323
2324 if (xen_feature(XENFEAT_auto_translated_physmap))
2325 return 0;
2326
2327 if (unlikely(order > MAX_CONTIG_ORDER))
2328 return -ENOMEM;
2329
2330 memset((void *) vstart, 0, PAGE_SIZE << order);
2331
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002332 spin_lock_irqsave(&xen_reservation_lock, flags);
2333
2334 /* 1. Zap current PTEs, remembering MFNs. */
2335 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2336
2337 /* 2. Get a new contiguous memory extent. */
2338 out_frame = virt_to_pfn(vstart);
2339 success = xen_exchange_memory(1UL << order, 0, in_frames,
2340 1, order, &out_frame,
2341 address_bits);
2342
2343 /* 3. Map the new extent in place of old pages. */
2344 if (success)
2345 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2346 else
2347 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2348
2349 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2350
Stefano Stabellini69908902013-10-09 16:56:32 +00002351 *dma_handle = virt_to_machine(vstart).maddr;
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002352 return success ? 0 : -ENOMEM;
2353}
2354EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2355
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002356void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002357{
2358 unsigned long *out_frames = discontig_frames, in_frame;
2359 unsigned long flags;
2360 int success;
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002361 unsigned long vstart;
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002362
2363 if (xen_feature(XENFEAT_auto_translated_physmap))
2364 return;
2365
2366 if (unlikely(order > MAX_CONTIG_ORDER))
2367 return;
2368
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002369 vstart = (unsigned long)phys_to_virt(pstart);
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002370 memset((void *) vstart, 0, PAGE_SIZE << order);
2371
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002372 spin_lock_irqsave(&xen_reservation_lock, flags);
2373
2374 /* 1. Find start MFN of contiguous extent. */
2375 in_frame = virt_to_mfn(vstart);
2376
2377 /* 2. Zap current PTEs. */
2378 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2379
2380 /* 3. Do the exchange for non-contiguous MFNs. */
2381 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2382 0, out_frames, 0);
2383
2384 /* 4. Map new pages in place of old pages. */
2385 if (success)
2386 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2387 else
2388 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2389
2390 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2391}
2392EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2393
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002394#ifdef CONFIG_XEN_PVHVM
Olaf Hering34b6f012012-10-01 21:18:01 +02002395#ifdef CONFIG_PROC_VMCORE
2396/*
2397 * This function is used in two contexts:
2398 * - the kdump kernel has to check whether a pfn of the crashed kernel
2399 * was a ballooned page. vmcore is using this function to decide
2400 * whether to access a pfn of the crashed kernel.
2401 * - the kexec kernel has to check whether a pfn was ballooned by the
2402 * previous kernel. If the pfn is ballooned, handle it properly.
2403 * Returns 0 if the pfn is not backed by a RAM page, the caller may
2404 * handle the pfn special in this case.
2405 */
2406static int xen_oldmem_pfn_is_ram(unsigned long pfn)
2407{
2408 struct xen_hvm_get_mem_type a = {
2409 .domid = DOMID_SELF,
2410 .pfn = pfn,
2411 };
2412 int ram;
2413
2414 if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a))
2415 return -ENXIO;
2416
2417 switch (a.mem_type) {
2418 case HVMMEM_mmio_dm:
2419 ram = 0;
2420 break;
2421 case HVMMEM_ram_rw:
2422 case HVMMEM_ram_ro:
2423 default:
2424 ram = 1;
2425 break;
2426 }
2427
2428 return ram;
2429}
2430#endif
2431
Stefano Stabellini59151002010-06-17 14:22:52 +01002432static void xen_hvm_exit_mmap(struct mm_struct *mm)
2433{
2434 struct xen_hvm_pagetable_dying a;
2435 int rc;
2436
2437 a.domid = DOMID_SELF;
2438 a.gpa = __pa(mm->pgd);
2439 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2440 WARN_ON_ONCE(rc < 0);
2441}
2442
2443static int is_pagetable_dying_supported(void)
2444{
2445 struct xen_hvm_pagetable_dying a;
2446 int rc = 0;
2447
2448 a.domid = DOMID_SELF;
2449 a.gpa = 0x00;
2450 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2451 if (rc < 0) {
2452 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
2453 return 0;
2454 }
2455 return 1;
2456}
2457
2458void __init xen_hvm_init_mmu_ops(void)
2459{
2460 if (is_pagetable_dying_supported())
2461 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
Olaf Hering34b6f012012-10-01 21:18:01 +02002462#ifdef CONFIG_PROC_VMCORE
2463 register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram);
2464#endif
Stefano Stabellini59151002010-06-17 14:22:52 +01002465}
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002466#endif
Stefano Stabellini59151002010-06-17 14:22:52 +01002467
Mukesh Rathor77945ca2014-05-23 19:33:44 -07002468#ifdef CONFIG_XEN_PVH
2469/*
2470 * Map foreign gfn (fgfn), to local pfn (lpfn). This for the user
2471 * space creating new guest on pvh dom0 and needing to map domU pages.
2472 */
2473static int xlate_add_to_p2m(unsigned long lpfn, unsigned long fgfn,
2474 unsigned int domid)
2475{
2476 int rc, err = 0;
2477 xen_pfn_t gpfn = lpfn;
2478 xen_ulong_t idx = fgfn;
2479
2480 struct xen_add_to_physmap_range xatp = {
2481 .domid = DOMID_SELF,
2482 .foreign_domid = domid,
2483 .size = 1,
2484 .space = XENMAPSPACE_gmfn_foreign,
2485 };
2486 set_xen_guest_handle(xatp.idxs, &idx);
2487 set_xen_guest_handle(xatp.gpfns, &gpfn);
2488 set_xen_guest_handle(xatp.errs, &err);
2489
2490 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
2491 if (rc < 0)
2492 return rc;
2493 return err;
2494}
2495
2496static int xlate_remove_from_p2m(unsigned long spfn, int count)
2497{
2498 struct xen_remove_from_physmap xrp;
2499 int i, rc;
2500
2501 for (i = 0; i < count; i++) {
2502 xrp.domid = DOMID_SELF;
2503 xrp.gpfn = spfn+i;
2504 rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
2505 if (rc)
2506 break;
2507 }
2508 return rc;
2509}
2510
2511struct xlate_remap_data {
2512 unsigned long fgfn; /* foreign domain's gfn */
2513 pgprot_t prot;
2514 domid_t domid;
2515 int index;
2516 struct page **pages;
2517};
2518
2519static int xlate_map_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
2520 void *data)
2521{
2522 int rc;
2523 struct xlate_remap_data *remap = data;
2524 unsigned long pfn = page_to_pfn(remap->pages[remap->index++]);
2525 pte_t pteval = pte_mkspecial(pfn_pte(pfn, remap->prot));
2526
2527 rc = xlate_add_to_p2m(pfn, remap->fgfn, remap->domid);
2528 if (rc)
2529 return rc;
2530 native_set_pte(ptep, pteval);
2531
2532 return 0;
2533}
2534
2535static int xlate_remap_gfn_range(struct vm_area_struct *vma,
2536 unsigned long addr, unsigned long mfn,
2537 int nr, pgprot_t prot, unsigned domid,
2538 struct page **pages)
2539{
2540 int err;
2541 struct xlate_remap_data pvhdata;
2542
2543 BUG_ON(!pages);
2544
2545 pvhdata.fgfn = mfn;
2546 pvhdata.prot = prot;
2547 pvhdata.domid = domid;
2548 pvhdata.index = 0;
2549 pvhdata.pages = pages;
2550 err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT,
2551 xlate_map_pte_fn, &pvhdata);
2552 flush_tlb_all();
2553 return err;
2554}
2555#endif
2556
Ian Campbellde1ef202009-05-21 10:09:46 +01002557#define REMAP_BATCH_SIZE 16
2558
2559struct remap_data {
2560 unsigned long mfn;
2561 pgprot_t prot;
2562 struct mmu_update *mmu_update;
2563};
2564
2565static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2566 unsigned long addr, void *data)
2567{
2568 struct remap_data *rmd = data;
David Vrabelf59c5142014-01-08 14:00:01 +00002569 pte_t pte = pte_mkspecial(mfn_pte(rmd->mfn++, rmd->prot));
Ian Campbellde1ef202009-05-21 10:09:46 +01002570
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -08002571 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
Ian Campbellde1ef202009-05-21 10:09:46 +01002572 rmd->mmu_update->val = pte_val_ma(pte);
2573 rmd->mmu_update++;
2574
2575 return 0;
2576}
2577
2578int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2579 unsigned long addr,
Ian Campbell7892f692012-10-16 17:19:15 +01002580 xen_pfn_t mfn, int nr,
Ian Campbell9a032e32012-10-17 13:37:49 -07002581 pgprot_t prot, unsigned domid,
2582 struct page **pages)
2583
Ian Campbellde1ef202009-05-21 10:09:46 +01002584{
2585 struct remap_data rmd;
2586 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
2587 int batch;
2588 unsigned long range;
2589 int err = 0;
2590
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -07002591 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
Ian Campbellde1ef202009-05-21 10:09:46 +01002592
Mukesh Rathor77945ca2014-05-23 19:33:44 -07002593 if (xen_feature(XENFEAT_auto_translated_physmap)) {
2594#ifdef CONFIG_XEN_PVH
2595 /* We need to update the local page tables and the xen HAP */
2596 return xlate_remap_gfn_range(vma, addr, mfn, nr, prot,
2597 domid, pages);
2598#else
2599 return -EINVAL;
2600#endif
2601 }
2602
Ian Campbellde1ef202009-05-21 10:09:46 +01002603 rmd.mfn = mfn;
2604 rmd.prot = prot;
2605
2606 while (nr) {
2607 batch = min(REMAP_BATCH_SIZE, nr);
2608 range = (unsigned long)batch << PAGE_SHIFT;
2609
2610 rmd.mmu_update = mmu_update;
2611 err = apply_to_page_range(vma->vm_mm, addr, range,
2612 remap_area_mfn_pte_fn, &rmd);
2613 if (err)
2614 goto out;
2615
David Vrabel69870a82012-08-30 13:58:11 +01002616 err = HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid);
2617 if (err < 0)
Ian Campbellde1ef202009-05-21 10:09:46 +01002618 goto out;
2619
2620 nr -= batch;
2621 addr += range;
2622 }
2623
2624 err = 0;
2625out:
2626
Konrad Rzeszutek Wilk95a7d762012-10-31 12:38:31 -04002627 xen_flush_tlb_all();
Ian Campbellde1ef202009-05-21 10:09:46 +01002628
2629 return err;
2630}
2631EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
Ian Campbell9a032e32012-10-17 13:37:49 -07002632
2633/* Returns: 0 success */
2634int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
2635 int numpgs, struct page **pages)
2636{
2637 if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
2638 return 0;
2639
Mukesh Rathor77945ca2014-05-23 19:33:44 -07002640#ifdef CONFIG_XEN_PVH
2641 while (numpgs--) {
2642 /*
2643 * The mmu has already cleaned up the process mmu
2644 * resources at this point (lookup_address will return
2645 * NULL).
2646 */
2647 unsigned long pfn = page_to_pfn(pages[numpgs]);
2648
2649 xlate_remove_from_p2m(pfn, 1);
2650 }
2651 /*
2652 * We don't need to flush tlbs because as part of
2653 * xlate_remove_from_p2m, the hypervisor will do tlb flushes
2654 * after removing the p2m entries from the EPT/NPT
2655 */
2656 return 0;
2657#else
Ian Campbell9a032e32012-10-17 13:37:49 -07002658 return -EINVAL;
Mukesh Rathor77945ca2014-05-23 19:33:44 -07002659#endif
Ian Campbell9a032e32012-10-17 13:37:49 -07002660}
2661EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);