blob: c04e14e6b301881c6d3b2bf6cda8ac3b29b6800a [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070043#include <linux/debugfs.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044#include <linux/bug.h>
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -070045#include <linux/vmalloc.h>
Randy Dunlap44408ad2009-05-12 13:31:40 -070046#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090047#include <linux/gfp.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -070048#include <linux/memblock.h>
Konrad Rzeszutek Wilk2222e712010-12-22 08:57:30 -050049#include <linux/seq_file.h>
Olaf Hering34b6f012012-10-01 21:18:01 +020050#include <linux/crash_dump.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070051
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -080052#include <trace/events/xen.h>
53
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070054#include <asm/pgtable.h>
55#include <asm/tlbflush.h>
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -070056#include <asm/fixmap.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070057#include <asm/mmu_context.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080058#include <asm/setup.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070059#include <asm/paravirt.h>
Alex Nixon7347b402010-02-19 13:31:06 -050060#include <asm/e820.h>
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070061#include <asm/linkage.h>
Alex Nixon08bbc9d2009-02-09 12:05:46 -080062#include <asm/page.h>
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -070063#include <asm/init.h>
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -070064#include <asm/pat.h>
Andrew Jones900cba82009-12-18 10:31:31 +010065#include <asm/smp.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070066
67#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070068#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070069
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080070#include <xen/xen.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070071#include <xen/page.h>
72#include <xen/interface/xen.h>
Stefano Stabellini59151002010-06-17 14:22:52 +010073#include <xen/interface/hvm/hvm_op.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080074#include <xen/interface/version.h>
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080075#include <xen/interface/memory.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080076#include <xen/hvc-console.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070077
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070078#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070079#include "mmu.h"
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070080#include "debugfs.h"
81
Alex Nixon19001c82009-02-09 12:05:46 -080082/*
83 * Protects atomic reservation decrease/increase against concurrent increases.
Daniel Kiper06f521d2011-03-08 22:45:46 +010084 * Also protects non-atomic updates of current_pages and balloon lists.
Alex Nixon19001c82009-02-09 12:05:46 -080085 */
86DEFINE_SPINLOCK(xen_reservation_lock);
87
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -040088#ifdef CONFIG_X86_32
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080089/*
90 * Identity map, in addition to plain kernel map. This needs to be
91 * large enough to allocate page table pages to allocate the rest.
92 * Each page can map 2MB.
93 */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -070094#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
95static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -040096#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080097#ifdef CONFIG_X86_64
98/* l3 pud for userspace vsyscall mapping */
99static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
100#endif /* CONFIG_X86_64 */
101
102/*
103 * Note about cr3 (pagetable base) values:
104 *
105 * xen_cr3 contains the current logical cr3 value; it contains the
106 * last set cr3. This may not be the current effective cr3, because
107 * its update may be being lazily deferred. However, a vcpu looking
108 * at its own cr3 can use this value knowing that it everything will
109 * be self-consistent.
110 *
111 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
112 * hypercall to set the vcpu cr3 is complete (so it may be a little
113 * out of date, but it will never be set early). If one vcpu is
114 * looking at another vcpu's cr3 value, it should use this variable.
115 */
116DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
117DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
118
119
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700120/*
121 * Just beyond the highest usermode address. STACK_TOP_MAX has a
122 * redzone above it, so round it up to a PGD boundary.
123 */
124#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
125
Jeremy Fitzhardinge9976b392009-02-27 09:19:26 -0800126unsigned long arbitrary_virt_to_mfn(void *vaddr)
127{
128 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
129
130 return PFN_DOWN(maddr.maddr);
131}
132
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700133xmaddr_t arbitrary_virt_to_machine(void *vaddr)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700134{
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700135 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100136 unsigned int level;
Chris Lalancette9f32d212008-10-23 17:40:25 -0700137 pte_t *pte;
138 unsigned offset;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700139
Chris Lalancette9f32d212008-10-23 17:40:25 -0700140 /*
141 * if the PFN is in the linear mapped vaddr range, we can just use
142 * the (quick) virt_to_machine() p2m lookup
143 */
144 if (virt_addr_valid(vaddr))
145 return virt_to_machine(vaddr);
146
147 /* otherwise we have to do a (slower) full page-table walk */
148
149 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700150 BUG_ON(pte == NULL);
Chris Lalancette9f32d212008-10-23 17:40:25 -0700151 offset = address & ~PAGE_MASK;
Jeremy Fitzhardingeebd879e2008-07-08 15:06:54 -0700152 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700153}
Stephen Rothwellde23be52011-01-15 10:36:26 +1100154EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700155
156void make_lowmem_page_readonly(void *vaddr)
157{
158 pte_t *pte, ptev;
159 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100160 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700161
Ingo Molnarf0646e42008-01-30 13:33:43 +0100162 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700163 if (pte == NULL)
164 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700165
166 ptev = pte_wrprotect(*pte);
167
168 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
169 BUG();
170}
171
172void make_lowmem_page_readwrite(void *vaddr)
173{
174 pte_t *pte, ptev;
175 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100176 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700177
Ingo Molnarf0646e42008-01-30 13:33:43 +0100178 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700179 if (pte == NULL)
180 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700181
182 ptev = pte_mkwrite(*pte);
183
184 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
185 BUG();
186}
187
188
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700189static bool xen_page_pinned(void *ptr)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100190{
191 struct page *page = virt_to_page(ptr);
192
193 return PagePinned(page);
194}
195
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800196void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800197{
198 struct multicall_space mcs;
199 struct mmu_update *u;
200
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800201 trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
202
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800203 mcs = xen_mc_entry(sizeof(*u));
204 u = mcs.args;
205
206 /* ptep might be kmapped when using 32-bit HIGHPTE */
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -0800207 u->ptr = virt_to_machine(ptep).maddr;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800208 u->val = pte_val_ma(pteval);
209
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800210 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800211
212 xen_mc_issue(PARAVIRT_LAZY_MMU);
213}
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800214EXPORT_SYMBOL_GPL(xen_set_domain_pte);
215
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700216static void xen_extend_mmu_update(const struct mmu_update *update)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700217{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700218 struct multicall_space mcs;
219 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700220
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700221 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
222
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700223 if (mcs.mc != NULL) {
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700224 mcs.mc->args[1]++;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700225 } else {
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700226 mcs = __xen_mc_entry(sizeof(*u));
227 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
228 }
229
230 u = mcs.args;
231 *u = *update;
232}
233
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800234static void xen_extend_mmuext_op(const struct mmuext_op *op)
235{
236 struct multicall_space mcs;
237 struct mmuext_op *u;
238
239 mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
240
241 if (mcs.mc != NULL) {
242 mcs.mc->args[1]++;
243 } else {
244 mcs = __xen_mc_entry(sizeof(*u));
245 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
246 }
247
248 u = mcs.args;
249 *u = *op;
250}
251
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800252static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700253{
254 struct mmu_update u;
255
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700256 preempt_disable();
257
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700258 xen_mc_batch();
259
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700260 /* ptr may be ioremapped for 64-bit pagetable setup */
261 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700262 u.val = pmd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700263 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700264
265 xen_mc_issue(PARAVIRT_LAZY_MMU);
266
267 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700268}
269
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800270static void xen_set_pmd(pmd_t *ptr, pmd_t val)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100271{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800272 trace_xen_mmu_set_pmd(ptr, val);
273
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100274 /* If page is not pinned, we can just update the entry
275 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700276 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100277 *ptr = val;
278 return;
279 }
280
281 xen_set_pmd_hyper(ptr, val);
282}
283
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700284/*
285 * Associate a virtual page frame with a given physical page frame
286 * and protection flags for that frame.
287 */
288void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
289{
Jeremy Fitzhardinge836fe2f2008-07-08 15:06:58 -0700290 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700291}
292
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800293static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
294{
295 struct mmu_update u;
296
297 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
298 return false;
299
300 xen_mc_batch();
301
302 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
303 u.val = pte_val_ma(pteval);
304 xen_extend_mmu_update(&u);
305
306 xen_mc_issue(PARAVIRT_LAZY_MMU);
307
308 return true;
309}
310
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800311static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800312{
David Vrabeld095d432012-07-09 11:39:05 +0100313 if (!xen_batched_set_pte(ptep, pteval)) {
314 /*
315 * Could call native_set_pte() here and trap and
316 * emulate the PTE write but with 32-bit guests this
317 * needs two traps (one for each of the two 32-bit
318 * words in the PTE) so do one hypercall directly
319 * instead.
320 */
321 struct mmu_update u;
322
323 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
324 u.val = pte_val_ma(pteval);
325 HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
326 }
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800327}
328
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800329static void xen_set_pte(pte_t *ptep, pte_t pteval)
330{
331 trace_xen_mmu_set_pte(ptep, pteval);
332 __xen_set_pte(ptep, pteval);
333}
334
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800335static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700336 pte_t *ptep, pte_t pteval)
337{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800338 trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
339 __xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700340}
341
Tejf63c2f22008-12-16 11:56:06 -0800342pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
343 unsigned long addr, pte_t *ptep)
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700344{
345 /* Just return the pte as-is. We preserve the bits on commit */
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800346 trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700347 return *ptep;
348}
349
350void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
351 pte_t *ptep, pte_t pte)
352{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700353 struct mmu_update u;
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700354
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800355 trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700356 xen_mc_batch();
357
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -0800358 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700359 u.val = pte_val_ma(pte);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700360 xen_extend_mmu_update(&u);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700361
362 xen_mc_issue(PARAVIRT_LAZY_MMU);
363}
364
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700365/* Assume pteval_t is equivalent to all the other *val_t types. */
366static pteval_t pte_mfn_to_pfn(pteval_t val)
367{
David Vrabel5926f872014-03-25 10:38:37 +0000368 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700369 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Konrad Rzeszutek Wilkb7e5ffe2012-05-03 16:14:14 -0400370 unsigned long pfn = mfn_to_pfn(mfn);
371
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700372 pteval_t flags = val & PTE_FLAGS_MASK;
Konrad Rzeszutek Wilkb7e5ffe2012-05-03 16:14:14 -0400373 if (unlikely(pfn == ~0))
374 val = flags & ~_PAGE_PRESENT;
375 else
376 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700377 }
378
379 return val;
380}
381
382static pteval_t pte_pfn_to_mfn(pteval_t val)
383{
David Vrabel5926f872014-03-25 10:38:37 +0000384 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700385 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700386 pteval_t flags = val & PTE_FLAGS_MASK;
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500387 unsigned long mfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700388
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500389 if (!xen_feature(XENFEAT_auto_translated_physmap))
Juergen Gross0aad5682014-11-28 11:53:57 +0100390 mfn = __pfn_to_mfn(pfn);
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500391 else
392 mfn = pfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700393 /*
394 * If there's no mfn for the pfn, then just create an
395 * empty non-present pte. Unfortunately this loses
396 * information about the original pfn, so
397 * pte_mfn_to_pfn is asymmetric.
398 */
399 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
400 mfn = 0;
401 flags = 0;
David Vrabel7f2f8822014-01-08 14:01:01 +0000402 } else
403 mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700404 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700405 }
406
407 return val;
408}
409
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700410__visible pteval_t xen_pte_val(pte_t pte)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700411{
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700412 pteval_t pteval = pte.pte;
Juergen Gross47591df2014-11-03 14:02:04 +0100413
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700414 return pte_mfn_to_pfn(pteval);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700415}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800416PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700417
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700418__visible pgdval_t xen_pgd_val(pgd_t pgd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700419{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700420 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700421}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800422PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700423
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700424__visible pte_t xen_make_pte(pteval_t pte)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700425{
David Vrabel7f2f8822014-01-08 14:01:01 +0000426 pte = pte_pfn_to_mfn(pte);
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800427
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700428 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700429}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800430PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700431
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700432__visible pgd_t xen_make_pgd(pgdval_t pgd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700433{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700434 pgd = pte_pfn_to_mfn(pgd);
435 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700436}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800437PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700438
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700439__visible pmdval_t xen_pmd_val(pmd_t pmd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700440{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700441 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700442}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800443PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100444
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800445static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700446{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700447 struct mmu_update u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700448
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700449 preempt_disable();
450
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700451 xen_mc_batch();
452
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700453 /* ptr may be ioremapped for 64-bit pagetable setup */
454 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700455 u.val = pud_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700456 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700457
458 xen_mc_issue(PARAVIRT_LAZY_MMU);
459
460 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700461}
462
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800463static void xen_set_pud(pud_t *ptr, pud_t val)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100464{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800465 trace_xen_mmu_set_pud(ptr, val);
466
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100467 /* If page is not pinned, we can just update the entry
468 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700469 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100470 *ptr = val;
471 return;
472 }
473
474 xen_set_pud_hyper(ptr, val);
475}
476
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700477#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800478static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700479{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800480 trace_xen_mmu_set_pte_atomic(ptep, pte);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700481 set_64bit((u64 *)ptep, native_pte_val(pte));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700482}
483
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800484static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700485{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800486 trace_xen_mmu_pte_clear(mm, addr, ptep);
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800487 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
488 native_pte_clear(mm, addr, ptep);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700489}
490
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800491static void xen_pmd_clear(pmd_t *pmdp)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700492{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800493 trace_xen_mmu_pmd_clear(pmdp);
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100494 set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700495}
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700496#endif /* CONFIG_X86_PAE */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700497
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700498__visible pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700499{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700500 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700501 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700502}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800503PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700504
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700505#if CONFIG_PGTABLE_LEVELS == 4
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700506__visible pudval_t xen_pud_val(pud_t pud)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700507{
508 return pte_mfn_to_pfn(pud.pud);
509}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800510PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700511
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700512__visible pud_t xen_make_pud(pudval_t pud)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700513{
514 pud = pte_pfn_to_mfn(pud);
515
516 return native_make_pud(pud);
517}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800518PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700519
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800520static pgd_t *xen_get_user_pgd(pgd_t *pgd)
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700521{
522 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
523 unsigned offset = pgd - pgd_page;
524 pgd_t *user_ptr = NULL;
525
526 if (offset < pgd_index(USER_LIMIT)) {
527 struct page *page = virt_to_page(pgd_page);
528 user_ptr = (pgd_t *)page->private;
529 if (user_ptr)
530 user_ptr += offset;
531 }
532
533 return user_ptr;
534}
535
536static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700537{
538 struct mmu_update u;
539
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700540 u.ptr = virt_to_machine(ptr).maddr;
541 u.val = pgd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700542 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700543}
544
545/*
546 * Raw hypercall-based set_pgd, intended for in early boot before
547 * there's a page structure. This implies:
548 * 1. The only existing pagetable is the kernel's
549 * 2. It is always pinned
550 * 3. It has no user pagetable attached to it
551 */
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800552static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700553{
554 preempt_disable();
555
556 xen_mc_batch();
557
558 __xen_set_pgd_hyper(ptr, val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700559
560 xen_mc_issue(PARAVIRT_LAZY_MMU);
561
562 preempt_enable();
563}
564
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800565static void xen_set_pgd(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700566{
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700567 pgd_t *user_ptr = xen_get_user_pgd(ptr);
568
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800569 trace_xen_mmu_set_pgd(ptr, user_ptr, val);
570
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700571 /* If page is not pinned, we can just update the entry
572 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700573 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700574 *ptr = val;
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700575 if (user_ptr) {
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700576 WARN_ON(xen_page_pinned(user_ptr));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700577 *user_ptr = val;
578 }
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700579 return;
580 }
581
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700582 /* If it's pinned, then we can at least batch the kernel and
583 user updates together. */
584 xen_mc_batch();
585
586 __xen_set_pgd_hyper(ptr, val);
587 if (user_ptr)
588 __xen_set_pgd_hyper(user_ptr, val);
589
590 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700591}
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700592#endif /* CONFIG_PGTABLE_LEVELS == 4 */
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700593
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700594/*
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700595 * (Yet another) pagetable walker. This one is intended for pinning a
596 * pagetable. This means that it walks a pagetable and calls the
597 * callback function on each page it finds making up the page table,
598 * at every level. It walks the entire pagetable, but it only bothers
599 * pinning pte pages which are below limit. In the normal case this
600 * will be STACK_TOP_MAX, but at boot we need to pin up to
601 * FIXADDR_TOP.
602 *
603 * For 32-bit the important bit is that we don't pin beyond there,
604 * because then we start getting into Xen's ptes.
605 *
606 * For 64-bit, we must skip the Xen hole in the middle of the address
607 * space, just after the big x86-64 virtual hole.
608 */
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000609static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
610 int (*func)(struct mm_struct *mm, struct page *,
611 enum pt_level),
612 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700613{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700614 int flush = 0;
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700615 unsigned hole_low, hole_high;
616 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
617 unsigned pgdidx, pudidx, pmdidx;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700618
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700619 /* The limit is the last byte to be touched */
620 limit--;
621 BUG_ON(limit >= FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700622
623 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700624 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700625
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700626 /*
627 * 64-bit has a great big hole in the middle of the address
628 * space, which contains the Xen mappings. On 32-bit these
629 * will end up making a zero-sized hole and so is a no-op.
630 */
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700631 hole_low = pgd_index(USER_LIMIT);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700632 hole_high = pgd_index(PAGE_OFFSET);
633
634 pgdidx_limit = pgd_index(limit);
635#if PTRS_PER_PUD > 1
636 pudidx_limit = pud_index(limit);
637#else
638 pudidx_limit = 0;
639#endif
640#if PTRS_PER_PMD > 1
641 pmdidx_limit = pmd_index(limit);
642#else
643 pmdidx_limit = 0;
644#endif
645
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700646 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700647 pud_t *pud;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700648
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700649 if (pgdidx >= hole_low && pgdidx < hole_high)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700650 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700651
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700652 if (!pgd_val(pgd[pgdidx]))
653 continue;
654
655 pud = pud_offset(&pgd[pgdidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700656
657 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700658 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700659
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700660 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700661 pmd_t *pmd;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700662
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700663 if (pgdidx == pgdidx_limit &&
664 pudidx > pudidx_limit)
665 goto out;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700666
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700667 if (pud_none(pud[pudidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700668 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700669
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700670 pmd = pmd_offset(&pud[pudidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700671
672 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700673 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700674
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700675 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
676 struct page *pte;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700677
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700678 if (pgdidx == pgdidx_limit &&
679 pudidx == pudidx_limit &&
680 pmdidx > pmdidx_limit)
681 goto out;
682
683 if (pmd_none(pmd[pmdidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700684 continue;
685
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700686 pte = pmd_page(pmd[pmdidx]);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700687 flush |= (*func)(mm, pte, PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700688 }
689 }
690 }
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700691
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700692out:
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700693 /* Do the top level last, so that the callbacks can use it as
694 a cue to do final things like tlb flushes. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700695 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700696
697 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700698}
699
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000700static int xen_pgd_walk(struct mm_struct *mm,
701 int (*func)(struct mm_struct *mm, struct page *,
702 enum pt_level),
703 unsigned long limit)
704{
705 return __xen_pgd_walk(mm, mm->pgd, func, limit);
706}
707
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700708/* If we're using split pte locks, then take the page's lock and
709 return a pointer to it. Otherwise return NULL. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700710static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700711{
712 spinlock_t *ptl = NULL;
713
Kirill A. Shutemov57c1ffc2013-11-14 14:30:45 -0800714#if USE_SPLIT_PTE_PTLOCKS
Kirill A. Shutemov49076ec2013-11-14 14:31:51 -0800715 ptl = ptlock_ptr(page);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700716 spin_lock_nest_lock(ptl, &mm->page_table_lock);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700717#endif
718
719 return ptl;
720}
721
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700722static void xen_pte_unlock(void *v)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700723{
724 spinlock_t *ptl = v;
725 spin_unlock(ptl);
726}
727
728static void xen_do_pin(unsigned level, unsigned long pfn)
729{
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800730 struct mmuext_op op;
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700731
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800732 op.cmd = level;
733 op.arg1.mfn = pfn_to_mfn(pfn);
734
735 xen_extend_mmuext_op(&op);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700736}
737
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700738static int xen_pin_page(struct mm_struct *mm, struct page *page,
739 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700740{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700741 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700742 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700743
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700744 if (pgfl)
745 flush = 0; /* already pinned */
746 else if (PageHighMem(page))
747 /* kmaps need flushing if we found an unpinned
748 highpage */
749 flush = 1;
750 else {
751 void *pt = lowmem_page_address(page);
752 unsigned long pfn = page_to_pfn(page);
753 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700754 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700755
756 flush = 0;
757
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700758 /*
759 * We need to hold the pagetable lock between the time
760 * we make the pagetable RO and when we actually pin
761 * it. If we don't, then other users may come in and
762 * attempt to update the pagetable by writing it,
763 * which will fail because the memory is RO but not
764 * pinned, so Xen won't do the trap'n'emulate.
765 *
766 * If we're using split pte locks, we can't hold the
767 * entire pagetable's worth of locks during the
768 * traverse, because we may wrap the preempt count (8
769 * bits). The solution is to mark RO and pin each PTE
770 * page while holding the lock. This means the number
771 * of locks we end up holding is never more than a
772 * batch size (~32 entries, at present).
773 *
774 * If we're not using split pte locks, we needn't pin
775 * the PTE pages independently, because we're
776 * protected by the overall pagetable lock.
777 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700778 ptl = NULL;
779 if (level == PT_PTE)
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700780 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700781
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700782 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
783 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700784 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
785
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700786 if (ptl) {
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700787 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
788
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700789 /* Queue a deferred unlock for when this batch
790 is completed. */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700791 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700792 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700793 }
794
795 return flush;
796}
797
798/* This is called just after a mm has been created, but it has not
799 been used yet. We need to make sure that its pagetable is all
800 read-only, and can be pinned. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700801static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700802{
Jeremy Fitzhardinge5f94fb52010-12-17 15:31:23 -0800803 trace_xen_mmu_pgd_pin(mm, pgd);
804
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700805 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700806
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000807 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100808 /* re-enable interrupts for flushing */
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700809 xen_mc_issue(0);
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100810
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700811 kmap_flush_unused();
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100812
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700813 xen_mc_batch();
814 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700815
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700816#ifdef CONFIG_X86_64
817 {
818 pgd_t *user_pgd = xen_get_user_pgd(pgd);
819
820 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
821
822 if (user_pgd) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700823 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
Tejf63c2f22008-12-16 11:56:06 -0800824 xen_do_pin(MMUEXT_PIN_L4_TABLE,
825 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700826 }
827 }
828#else /* CONFIG_X86_32 */
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700829#ifdef CONFIG_X86_PAE
830 /* Need to make sure unshared kernel PMD is pinnable */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -0800831 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700832 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700833#endif
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100834 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700835#endif /* CONFIG_X86_64 */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700836 xen_mc_issue(0);
837}
838
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700839static void xen_pgd_pin(struct mm_struct *mm)
840{
841 __xen_pgd_pin(mm, mm->pgd);
842}
843
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100844/*
845 * On save, we need to pin all pagetables to make sure they get their
846 * mfns turned into pfns. Search the list for any unpinned pgds and pin
847 * them (unpinned pgds are not currently in use, probably because the
848 * process is under construction or destruction).
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700849 *
850 * Expected to be called in stop_machine() ("equivalent to taking
851 * every spinlock in the system"), so the locking doesn't really
852 * matter all that much.
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100853 */
854void xen_mm_pin_all(void)
855{
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100856 struct page *page;
857
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800858 spin_lock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100859
860 list_for_each_entry(page, &pgd_list, lru) {
861 if (!PagePinned(page)) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700862 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100863 SetPageSavePinned(page);
864 }
865 }
866
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800867 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100868}
869
Eduardo Habkostc1f2f092008-07-08 15:06:24 -0700870/*
871 * The init_mm pagetable is really pinned as soon as its created, but
872 * that's before we have page structures to store the bits. So do all
873 * the book-keeping now.
874 */
Daniel Kiper3f5089532011-05-12 17:19:53 -0400875static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700876 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700877{
878 SetPagePinned(page);
879 return 0;
880}
881
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -0700882static void __init xen_mark_init_mm_pinned(void)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700883{
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700884 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700885}
886
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700887static int xen_unpin_page(struct mm_struct *mm, struct page *page,
888 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700889{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700890 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700891
892 if (pgfl && !PageHighMem(page)) {
893 void *pt = lowmem_page_address(page);
894 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700895 spinlock_t *ptl = NULL;
896 struct multicall_space mcs;
897
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700898 /*
899 * Do the converse to pin_page. If we're using split
900 * pte locks, we must be holding the lock for while
901 * the pte page is unpinned but still RO to prevent
902 * concurrent updates from seeing it in this
903 * partially-pinned state.
904 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700905 if (level == PT_PTE) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700906 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700907
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700908 if (ptl)
909 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700910 }
911
912 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700913
914 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
915 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700916 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
917
918 if (ptl) {
919 /* unlock when batch completed */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700920 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700921 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700922 }
923
924 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700925}
926
927/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700928static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700929{
Jeremy Fitzhardinge5f94fb52010-12-17 15:31:23 -0800930 trace_xen_mmu_pgd_unpin(mm, pgd);
931
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700932 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700933
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700934 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700935
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700936#ifdef CONFIG_X86_64
937 {
938 pgd_t *user_pgd = xen_get_user_pgd(pgd);
939
940 if (user_pgd) {
Tejf63c2f22008-12-16 11:56:06 -0800941 xen_do_pin(MMUEXT_UNPIN_TABLE,
942 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700943 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700944 }
945 }
946#endif
947
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700948#ifdef CONFIG_X86_PAE
949 /* Need to make sure unshared kernel PMD is unpinned */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -0800950 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700951 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700952#endif
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700953
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000954 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700955
956 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700957}
958
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700959static void xen_pgd_unpin(struct mm_struct *mm)
960{
961 __xen_pgd_unpin(mm, mm->pgd);
962}
963
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100964/*
965 * On resume, undo any pinning done at save, so that the rest of the
966 * kernel doesn't see any unexpected pinned pagetables.
967 */
968void xen_mm_unpin_all(void)
969{
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100970 struct page *page;
971
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800972 spin_lock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100973
974 list_for_each_entry(page, &pgd_list, lru) {
975 if (PageSavePinned(page)) {
976 BUG_ON(!PagePinned(page));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700977 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100978 ClearPageSavePinned(page);
979 }
980 }
981
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800982 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100983}
984
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800985static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700986{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700987 spin_lock(&next->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700988 xen_pgd_pin(next);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700989 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700990}
991
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800992static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700993{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700994 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700995 xen_pgd_pin(mm);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700996 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700997}
998
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700999
1000#ifdef CONFIG_SMP
1001/* Another cpu may still have their %cr3 pointing at the pagetable, so
1002 we need to repoint it somewhere else before we can unpin it. */
1003static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001004{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001005 struct mm_struct *mm = info;
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001006 struct mm_struct *active_mm;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001007
Alex Shi2113f462012-01-13 23:53:35 +08001008 active_mm = this_cpu_read(cpu_tlbstate.active_mm);
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001009
Alex Shi2113f462012-01-13 23:53:35 +08001010 if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001011 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001012
1013 /* If this cpu still has a stale cr3 reference, then make sure
1014 it has been flushed. */
Alex Shi2113f462012-01-13 23:53:35 +08001015 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001016 load_cr3(swapper_pg_dir);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001017}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001018
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001019static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001020{
Mike Travise4d98202008-12-16 17:34:05 -08001021 cpumask_var_t mask;
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001022 unsigned cpu;
1023
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001024 if (current->active_mm == mm) {
1025 if (current->mm == mm)
1026 load_cr3(swapper_pg_dir);
1027 else
1028 leave_mm(smp_processor_id());
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001029 }
1030
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001031 /* Get the "official" set of cpus referring to our pagetable. */
Mike Travise4d98202008-12-16 17:34:05 -08001032 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1033 for_each_online_cpu(cpu) {
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001034 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
Mike Travise4d98202008-12-16 17:34:05 -08001035 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1036 continue;
1037 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1038 }
1039 return;
1040 }
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001041 cpumask_copy(mask, mm_cpumask(mm));
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001042
1043 /* It's possible that a vcpu may have a stale reference to our
1044 cr3, because its in lazy mode, and it hasn't yet flushed
1045 its set of pending hypercalls yet. In this case, we can
1046 look at its actual current cr3 value, and force it to flush
1047 if needed. */
1048 for_each_online_cpu(cpu) {
1049 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
Mike Travise4d98202008-12-16 17:34:05 -08001050 cpumask_set_cpu(cpu, mask);
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001051 }
1052
Mike Travise4d98202008-12-16 17:34:05 -08001053 if (!cpumask_empty(mask))
1054 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1055 free_cpumask_var(mask);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001056}
1057#else
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001058static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001059{
1060 if (current->active_mm == mm)
1061 load_cr3(swapper_pg_dir);
1062}
1063#endif
1064
1065/*
1066 * While a process runs, Xen pins its pagetables, which means that the
1067 * hypervisor forces it to be read-only, and it controls all updates
1068 * to it. This means that all pagetable updates have to go via the
1069 * hypervisor, which is moderately expensive.
1070 *
1071 * Since we're pulling the pagetable down, we switch to use init_mm,
1072 * unpin old process pagetable and mark it all read-write, which
1073 * allows further operations on it to be simple memory accesses.
1074 *
1075 * The only subtle point is that another CPU may be still using the
1076 * pagetable because of lazy tlb flushing. This means we need need to
1077 * switch all CPUs off this pagetable before we can unpin it.
1078 */
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001079static void xen_exit_mmap(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001080{
1081 get_cpu(); /* make sure we don't move around */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001082 xen_drop_mm_ref(mm);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001083 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001084
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001085 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -07001086
1087 /* pgd may not be pinned in the error exit path of execve */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001088 if (xen_page_pinned(mm->pgd))
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001089 xen_pgd_unpin(mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001090
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001091 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001092}
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07001093
Attilio Raoc7112882012-08-21 21:22:40 +01001094static void xen_post_allocator_init(void);
1095
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001096#ifdef CONFIG_X86_64
1097static void __init xen_cleanhighmap(unsigned long vaddr,
1098 unsigned long vaddr_end)
1099{
1100 unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
1101 pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
1102
1103 /* NOTE: The loop is more greedy than the cleanup_highmap variant.
1104 * We include the PMD passed in on _both_ boundaries. */
1105 for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE));
1106 pmd++, vaddr += PMD_SIZE) {
1107 if (pmd_none(*pmd))
1108 continue;
1109 if (vaddr < (unsigned long) _text || vaddr > kernel_end)
1110 set_pmd(pmd, __pmd(0));
1111 }
1112 /* In case we did something silly, we should crash in this function
1113 * instead of somewhere later and be confusing. */
1114 xen_mc_flush();
1115}
Juergen Gross054954e2014-11-28 11:53:58 +01001116
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001117/*
1118 * Make a page range writeable and free it.
1119 */
1120static void __init xen_free_ro_pages(unsigned long paddr, unsigned long size)
1121{
1122 void *vaddr = __va(paddr);
1123 void *vaddr_end = vaddr + size;
1124
1125 for (; vaddr < vaddr_end; vaddr += PAGE_SIZE)
1126 make_lowmem_page_readwrite(vaddr);
1127
1128 memblock_free(paddr, size);
1129}
1130
1131static void __init xen_cleanmfnmap_free_pgtbl(void *pgtbl)
1132{
1133 unsigned long pa = __pa(pgtbl) & PHYSICAL_PAGE_MASK;
1134
1135 ClearPagePinned(virt_to_page(__va(pa)));
1136 xen_free_ro_pages(pa, PAGE_SIZE);
1137}
1138
1139/*
1140 * Since it is well isolated we can (and since it is perhaps large we should)
1141 * also free the page tables mapping the initial P->M table.
1142 */
1143static void __init xen_cleanmfnmap(unsigned long vaddr)
1144{
1145 unsigned long va = vaddr & PMD_MASK;
1146 unsigned long pa;
1147 pgd_t *pgd = pgd_offset_k(va);
1148 pud_t *pud_page = pud_offset(pgd, 0);
1149 pud_t *pud;
1150 pmd_t *pmd;
1151 pte_t *pte;
1152 unsigned int i;
1153
1154 set_pgd(pgd, __pgd(0));
1155 do {
1156 pud = pud_page + pud_index(va);
1157 if (pud_none(*pud)) {
1158 va += PUD_SIZE;
1159 } else if (pud_large(*pud)) {
1160 pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
1161 xen_free_ro_pages(pa, PUD_SIZE);
1162 va += PUD_SIZE;
1163 } else {
1164 pmd = pmd_offset(pud, va);
1165 if (pmd_large(*pmd)) {
1166 pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK;
1167 xen_free_ro_pages(pa, PMD_SIZE);
1168 } else if (!pmd_none(*pmd)) {
1169 pte = pte_offset_kernel(pmd, va);
1170 for (i = 0; i < PTRS_PER_PTE; ++i) {
1171 if (pte_none(pte[i]))
1172 break;
1173 pa = pte_pfn(pte[i]) << PAGE_SHIFT;
1174 xen_free_ro_pages(pa, PAGE_SIZE);
1175 }
1176 xen_cleanmfnmap_free_pgtbl(pte);
1177 }
1178 va += PMD_SIZE;
1179 if (pmd_index(va))
1180 continue;
1181 xen_cleanmfnmap_free_pgtbl(pmd);
1182 }
1183
1184 } while (pud_index(va) || pmd_index(va));
1185 xen_cleanmfnmap_free_pgtbl(pud_page);
1186}
1187
Juergen Gross054954e2014-11-28 11:53:58 +01001188static void __init xen_pagetable_p2m_free(void)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001189{
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001190 unsigned long size;
1191 unsigned long addr;
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001192
1193 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1194
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001195 /* No memory or already called. */
Juergen Gross054954e2014-11-28 11:53:58 +01001196 if ((unsigned long)xen_p2m_addr == xen_start_info->mfn_list)
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001197 return;
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001198
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001199 /* using __ka address and sticking INVALID_P2M_ENTRY! */
1200 memset((void *)xen_start_info->mfn_list, 0xff, size);
1201
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001202 addr = xen_start_info->mfn_list;
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001203 /*
1204 * We could be in __ka space.
1205 * We roundup to the PMD, which means that if anybody at this stage is
1206 * using the __ka address of xen_start_info or
1207 * xen_start_info->shared_info they are in going to crash. Fortunatly
1208 * we have already revectored in xen_setup_kernel_pagetable and in
1209 * xen_setup_shared_info.
1210 */
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001211 size = roundup(size, PMD_SIZE);
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001212
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001213 if (addr >= __START_KERNEL_map) {
1214 xen_cleanhighmap(addr, addr + size);
1215 size = PAGE_ALIGN(xen_start_info->nr_pages *
1216 sizeof(unsigned long));
1217 memblock_free(__pa(addr), size);
1218 } else {
1219 xen_cleanmfnmap(addr);
1220 }
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001221
Konrad Rzeszutek Wilk3aca7fb2012-08-14 14:34:00 -04001222 /* At this stage, cleanup_highmap has already cleaned __ka space
1223 * from _brk_limit way up to the max_pfn_mapped (which is the end of
1224 * the ramdisk). We continue on, erasing PMD entries that point to page
1225 * tables - do note that they are accessible at this stage via __va.
1226 * For good measure we also round up to the PMD - which means that if
1227 * anybody is using __ka address to the initial boot-stack - and try
1228 * to use it - they are going to crash. The xen_start_info has been
1229 * taken care of already in xen_setup_kernel_pagetable. */
1230 addr = xen_start_info->pt_base;
1231 size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE);
1232
1233 xen_cleanhighmap(addr, addr + size);
1234 xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
1235#ifdef DEBUG
1236 /* This is superflous and is not neccessary, but you know what
1237 * lets do it. The MODULES_VADDR -> MODULES_END should be clear of
1238 * anything at this stage. */
1239 xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
1240#endif
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001241}
1242#endif
1243
Juergen Gross054954e2014-11-28 11:53:58 +01001244static void __init xen_pagetable_p2m_setup(void)
1245{
1246 if (xen_feature(XENFEAT_auto_translated_physmap))
1247 return;
1248
1249 xen_vmalloc_p2m_tree();
1250
1251#ifdef CONFIG_X86_64
1252 xen_pagetable_p2m_free();
1253#endif
1254 /* And revector! Bye bye old array */
1255 xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
1256}
1257
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001258static void __init xen_pagetable_init(void)
1259{
1260 paging_init();
Juergen Grosscdfa0ba2014-12-10 16:56:03 +01001261 xen_post_allocator_init();
Juergen Gross054954e2014-11-28 11:53:58 +01001262
1263 xen_pagetable_p2m_setup();
1264
Juergen Gross2c185682014-10-14 13:33:46 +02001265 /* Allocate and initialize top and mid mfn levels for p2m structure */
1266 xen_build_mfn_list_list();
1267
Juergen Gross1f3ac862014-11-28 11:53:53 +01001268 /* Remap memory freed due to conflicts with E820 map */
1269 if (!xen_feature(XENFEAT_auto_translated_physmap))
1270 xen_remap_memory();
1271
Juergen Gross2c185682014-10-14 13:33:46 +02001272 xen_setup_shared_info();
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001273}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001274static void xen_write_cr2(unsigned long cr2)
1275{
Alex Shi2113f462012-01-13 23:53:35 +08001276 this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001277}
1278
1279static unsigned long xen_read_cr2(void)
1280{
Alex Shi2113f462012-01-13 23:53:35 +08001281 return this_cpu_read(xen_vcpu)->arch.cr2;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001282}
1283
1284unsigned long xen_read_cr2_direct(void)
1285{
Alex Shi2113f462012-01-13 23:53:35 +08001286 return this_cpu_read(xen_vcpu_info.arch.cr2);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001287}
1288
Konrad Rzeszutek Wilk95a7d762012-10-31 12:38:31 -04001289void xen_flush_tlb_all(void)
1290{
1291 struct mmuext_op *op;
1292 struct multicall_space mcs;
1293
1294 trace_xen_mmu_flush_tlb_all(0);
1295
1296 preempt_disable();
1297
1298 mcs = xen_mc_entry(sizeof(*op));
1299
1300 op = mcs.args;
1301 op->cmd = MMUEXT_TLB_FLUSH_ALL;
1302 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1303
1304 xen_mc_issue(PARAVIRT_LAZY_MMU);
1305
1306 preempt_enable();
1307}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001308static void xen_flush_tlb(void)
1309{
1310 struct mmuext_op *op;
1311 struct multicall_space mcs;
1312
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001313 trace_xen_mmu_flush_tlb(0);
1314
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001315 preempt_disable();
1316
1317 mcs = xen_mc_entry(sizeof(*op));
1318
1319 op = mcs.args;
1320 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1321 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1322
1323 xen_mc_issue(PARAVIRT_LAZY_MMU);
1324
1325 preempt_enable();
1326}
1327
1328static void xen_flush_tlb_single(unsigned long addr)
1329{
1330 struct mmuext_op *op;
1331 struct multicall_space mcs;
1332
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001333 trace_xen_mmu_flush_tlb_single(addr);
1334
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001335 preempt_disable();
1336
1337 mcs = xen_mc_entry(sizeof(*op));
1338 op = mcs.args;
1339 op->cmd = MMUEXT_INVLPG_LOCAL;
1340 op->arg1.linear_addr = addr & PAGE_MASK;
1341 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1342
1343 xen_mc_issue(PARAVIRT_LAZY_MMU);
1344
1345 preempt_enable();
1346}
1347
1348static void xen_flush_tlb_others(const struct cpumask *cpus,
Alex Shie7b52ff2012-06-28 09:02:17 +08001349 struct mm_struct *mm, unsigned long start,
1350 unsigned long end)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001351{
1352 struct {
1353 struct mmuext_op op;
Konrad Rzeszutek Wilk32dd1192011-06-30 09:12:40 -04001354#ifdef CONFIG_SMP
Andrew Jones900cba82009-12-18 10:31:31 +01001355 DECLARE_BITMAP(mask, num_processors);
Konrad Rzeszutek Wilk32dd1192011-06-30 09:12:40 -04001356#else
1357 DECLARE_BITMAP(mask, NR_CPUS);
1358#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001359 } *args;
1360 struct multicall_space mcs;
1361
Alex Shie7b52ff2012-06-28 09:02:17 +08001362 trace_xen_mmu_flush_tlb_others(cpus, mm, start, end);
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001363
Jeremy Fitzhardingee3f8a742009-03-04 17:36:57 -08001364 if (cpumask_empty(cpus))
1365 return; /* nothing to do */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001366
1367 mcs = xen_mc_entry(sizeof(*args));
1368 args = mcs.args;
1369 args->op.arg2.vcpumask = to_cpumask(args->mask);
1370
1371 /* Remove us, and any offline CPUS. */
1372 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1373 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001374
Alex Shie7b52ff2012-06-28 09:02:17 +08001375 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
Alex Shice7184b2012-08-24 08:55:13 +00001376 if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001377 args->op.cmd = MMUEXT_INVLPG_MULTI;
Alex Shie7b52ff2012-06-28 09:02:17 +08001378 args->op.arg1.linear_addr = start;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001379 }
1380
1381 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1382
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001383 xen_mc_issue(PARAVIRT_LAZY_MMU);
1384}
1385
1386static unsigned long xen_read_cr3(void)
1387{
Alex Shi2113f462012-01-13 23:53:35 +08001388 return this_cpu_read(xen_cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001389}
1390
1391static void set_current_cr3(void *v)
1392{
Alex Shi2113f462012-01-13 23:53:35 +08001393 this_cpu_write(xen_current_cr3, (unsigned long)v);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001394}
1395
1396static void __xen_write_cr3(bool kernel, unsigned long cr3)
1397{
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001398 struct mmuext_op op;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001399 unsigned long mfn;
1400
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001401 trace_xen_mmu_write_cr3(kernel, cr3);
1402
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001403 if (cr3)
1404 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1405 else
1406 mfn = 0;
1407
1408 WARN_ON(mfn == 0 && kernel);
1409
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001410 op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1411 op.arg1.mfn = mfn;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001412
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001413 xen_extend_mmuext_op(&op);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001414
1415 if (kernel) {
Alex Shi2113f462012-01-13 23:53:35 +08001416 this_cpu_write(xen_cr3, cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001417
1418 /* Update xen_current_cr3 once the batch has actually
1419 been submitted. */
1420 xen_mc_callback(set_current_cr3, (void *)cr3);
1421 }
1422}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001423static void xen_write_cr3(unsigned long cr3)
1424{
1425 BUG_ON(preemptible());
1426
1427 xen_mc_batch(); /* disables interrupts */
1428
1429 /* Update while interrupts are disabled, so its atomic with
1430 respect to ipis */
Alex Shi2113f462012-01-13 23:53:35 +08001431 this_cpu_write(xen_cr3, cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001432
1433 __xen_write_cr3(true, cr3);
1434
1435#ifdef CONFIG_X86_64
1436 {
1437 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1438 if (user_pgd)
1439 __xen_write_cr3(false, __pa(user_pgd));
1440 else
1441 __xen_write_cr3(false, 0);
1442 }
1443#endif
1444
1445 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1446}
1447
Konrad Rzeszutek Wilk0cc91292013-02-22 17:35:13 -08001448#ifdef CONFIG_X86_64
1449/*
1450 * At the start of the day - when Xen launches a guest, it has already
1451 * built pagetables for the guest. We diligently look over them
1452 * in xen_setup_kernel_pagetable and graft as appropiate them in the
1453 * init_level4_pgt and its friends. Then when we are happy we load
1454 * the new init_level4_pgt - and continue on.
1455 *
1456 * The generic code starts (start_kernel) and 'init_mem_mapping' sets
1457 * up the rest of the pagetables. When it has completed it loads the cr3.
1458 * N.B. that baremetal would start at 'start_kernel' (and the early
1459 * #PF handler would create bootstrap pagetables) - so we are running
1460 * with the same assumptions as what to do when write_cr3 is executed
1461 * at this point.
1462 *
1463 * Since there are no user-page tables at all, we have two variants
1464 * of xen_write_cr3 - the early bootup (this one), and the late one
1465 * (xen_write_cr3). The reason we have to do that is that in 64-bit
1466 * the Linux kernel and user-space are both in ring 3 while the
1467 * hypervisor is in ring 0.
1468 */
1469static void __init xen_write_cr3_init(unsigned long cr3)
1470{
1471 BUG_ON(preemptible());
1472
1473 xen_mc_batch(); /* disables interrupts */
1474
1475 /* Update while interrupts are disabled, so its atomic with
1476 respect to ipis */
1477 this_cpu_write(xen_cr3, cr3);
1478
1479 __xen_write_cr3(true, cr3);
1480
1481 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
Konrad Rzeszutek Wilk0cc91292013-02-22 17:35:13 -08001482}
1483#endif
1484
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001485static int xen_pgd_alloc(struct mm_struct *mm)
1486{
1487 pgd_t *pgd = mm->pgd;
1488 int ret = 0;
1489
1490 BUG_ON(PagePinned(virt_to_page(pgd)));
1491
1492#ifdef CONFIG_X86_64
1493 {
1494 struct page *page = virt_to_page(pgd);
1495 pgd_t *user_pgd;
1496
1497 BUG_ON(page->private != 0);
1498
1499 ret = -ENOMEM;
1500
1501 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1502 page->private = (unsigned long)user_pgd;
1503
1504 if (user_pgd != NULL) {
Andy Lutomirski1ad83c82014-10-29 14:33:47 -07001505#ifdef CONFIG_X86_VSYSCALL_EMULATION
Andy Lutomirskif40c3302014-05-05 12:19:36 -07001506 user_pgd[pgd_index(VSYSCALL_ADDR)] =
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001507 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
Andy Lutomirski1ad83c82014-10-29 14:33:47 -07001508#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001509 ret = 0;
1510 }
1511
1512 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1513 }
1514#endif
1515
1516 return ret;
1517}
1518
1519static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1520{
1521#ifdef CONFIG_X86_64
1522 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1523
1524 if (user_pgd)
1525 free_page((unsigned long)user_pgd);
1526#endif
1527}
1528
Stefano Stabelliniee176452011-04-19 14:47:31 +01001529#ifdef CONFIG_X86_32
Daniel Kiper3f5089532011-05-12 17:19:53 -04001530static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001531{
1532 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1533 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1534 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1535 pte_val_ma(pte));
Stefano Stabelliniee176452011-04-19 14:47:31 +01001536
1537 return pte;
1538}
1539#else /* CONFIG_X86_64 */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001540static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
Stefano Stabelliniee176452011-04-19 14:47:31 +01001541{
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001542 unsigned long pfn;
1543
1544 if (xen_feature(XENFEAT_writable_page_tables) ||
1545 xen_feature(XENFEAT_auto_translated_physmap) ||
1546 xen_start_info->mfn_list >= __START_KERNEL_map)
1547 return pte;
1548
1549 /*
1550 * Pages belonging to the initial p2m list mapped outside the default
1551 * address range must be mapped read-only. This region contains the
1552 * page tables for mapping the p2m list, too, and page tables MUST be
1553 * mapped read-only.
1554 */
1555 pfn = pte_pfn(pte);
1556 if (pfn >= xen_start_info->first_p2m_pfn &&
1557 pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
1558 pte = __pte_ma(pte_val_ma(pte) & ~_PAGE_RW);
1559
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001560 return pte;
1561}
Stefano Stabelliniee176452011-04-19 14:47:31 +01001562#endif /* CONFIG_X86_64 */
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001563
David Vrabeld095d432012-07-09 11:39:05 +01001564/*
1565 * Init-time set_pte while constructing initial pagetables, which
1566 * doesn't allow RO page table pages to be remapped RW.
1567 *
David Vrabel66a27dd2012-07-09 11:39:06 +01001568 * If there is no MFN for this PFN then this page is initially
1569 * ballooned out so clear the PTE (as in decrease_reservation() in
1570 * drivers/xen/balloon.c).
1571 *
David Vrabeld095d432012-07-09 11:39:05 +01001572 * Many of these PTE updates are done on unpinned and writable pages
1573 * and doing a hypercall for these is unnecessary and expensive. At
1574 * this point it is not possible to tell if a page is pinned or not,
1575 * so always write the PTE directly and rely on Xen trapping and
1576 * emulating any updates as necessary.
1577 */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001578static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001579{
David Vrabel66a27dd2012-07-09 11:39:06 +01001580 if (pte_mfn(pte) != INVALID_P2M_ENTRY)
1581 pte = mask_rw_pte(ptep, pte);
1582 else
1583 pte = __pte_ma(0);
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001584
David Vrabeld095d432012-07-09 11:39:05 +01001585 native_set_pte(ptep, pte);
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001586}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001587
Juergen Grossbf9d8342015-01-28 07:44:24 +01001588static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001589{
1590 struct mmuext_op op;
1591 op.cmd = cmd;
1592 op.arg1.mfn = pfn_to_mfn(pfn);
1593 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1594 BUG();
1595}
1596
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001597/* Early in boot, while setting up the initial pagetable, assume
1598 everything is pinned. */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001599static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001600{
1601#ifdef CONFIG_FLATMEM
1602 BUG_ON(mem_map); /* should only be used early */
1603#endif
1604 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001605 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1606}
1607
1608/* Used for pmd and pud */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001609static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001610{
1611#ifdef CONFIG_FLATMEM
1612 BUG_ON(mem_map); /* should only be used early */
1613#endif
1614 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001615}
1616
1617/* Early release_pte assumes that all pts are pinned, since there's
1618 only init_mm and anything attached to that is pinned. */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001619static void __init xen_release_pte_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001620{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001621 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001622 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1623}
1624
Daniel Kiper3f5089532011-05-12 17:19:53 -04001625static void __init xen_release_pmd_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001626{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001627 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001628}
1629
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001630static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1631{
1632 struct multicall_space mcs;
1633 struct mmuext_op *op;
1634
1635 mcs = __xen_mc_entry(sizeof(*op));
1636 op = mcs.args;
1637 op->cmd = cmd;
1638 op->arg1.mfn = pfn_to_mfn(pfn);
1639
1640 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1641}
1642
1643static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1644{
1645 struct multicall_space mcs;
1646 unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1647
1648 mcs = __xen_mc_entry(0);
1649 MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1650 pfn_pte(pfn, prot), 0);
1651}
1652
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001653/* This needs to make sure the new pte page is pinned iff its being
1654 attached to a pinned pagetable. */
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001655static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1656 unsigned level)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001657{
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001658 bool pinned = PagePinned(virt_to_page(mm->pgd));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001659
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001660 trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001661
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001662 if (pinned) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001663 struct page *page = pfn_to_page(pfn);
1664
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001665 SetPagePinned(page);
1666
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001667 if (!PageHighMem(page)) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001668 xen_mc_batch();
1669
1670 __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1671
Kirill A. Shutemov57c1ffc2013-11-14 14:30:45 -08001672 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001673 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1674
1675 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001676 } else {
1677 /* make sure there are no stray mappings of
1678 this page */
1679 kmap_flush_unused();
1680 }
1681 }
1682}
1683
1684static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1685{
1686 xen_alloc_ptpage(mm, pfn, PT_PTE);
1687}
1688
1689static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1690{
1691 xen_alloc_ptpage(mm, pfn, PT_PMD);
1692}
1693
1694/* This should never happen until we're OK to use struct page */
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001695static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001696{
1697 struct page *page = pfn_to_page(pfn);
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001698 bool pinned = PagePinned(page);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001699
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001700 trace_xen_mmu_release_ptpage(pfn, level, pinned);
1701
1702 if (pinned) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001703 if (!PageHighMem(page)) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001704 xen_mc_batch();
1705
Kirill A. Shutemov57c1ffc2013-11-14 14:30:45 -08001706 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001707 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1708
1709 __set_pfn_prot(pfn, PAGE_KERNEL);
1710
1711 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001712 }
1713 ClearPagePinned(page);
1714 }
1715}
1716
1717static void xen_release_pte(unsigned long pfn)
1718{
1719 xen_release_ptpage(pfn, PT_PTE);
1720}
1721
1722static void xen_release_pmd(unsigned long pfn)
1723{
1724 xen_release_ptpage(pfn, PT_PMD);
1725}
1726
Kirill A. Shutemov98233362015-04-14 15:46:14 -07001727#if CONFIG_PGTABLE_LEVELS == 4
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001728static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1729{
1730 xen_alloc_ptpage(mm, pfn, PT_PUD);
1731}
1732
1733static void xen_release_pud(unsigned long pfn)
1734{
1735 xen_release_ptpage(pfn, PT_PUD);
1736}
1737#endif
1738
1739void __init xen_reserve_top(void)
1740{
1741#ifdef CONFIG_X86_32
1742 unsigned long top = HYPERVISOR_VIRT_START;
1743 struct xen_platform_parameters pp;
1744
1745 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1746 top = pp.virt_start;
1747
1748 reserve_top_address(-top);
1749#endif /* CONFIG_X86_32 */
1750}
1751
1752/*
1753 * Like __va(), but returns address in the kernel mapping (which is
1754 * all we have until the physical memory mapping has been set up.
1755 */
Juergen Grossbf9d8342015-01-28 07:44:24 +01001756static void * __init __ka(phys_addr_t paddr)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001757{
1758#ifdef CONFIG_X86_64
1759 return (void *)(paddr + __START_KERNEL_map);
1760#else
1761 return __va(paddr);
1762#endif
1763}
1764
1765/* Convert a machine address to physical address */
Juergen Grossbf9d8342015-01-28 07:44:24 +01001766static unsigned long __init m2p(phys_addr_t maddr)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001767{
1768 phys_addr_t paddr;
1769
1770 maddr &= PTE_PFN_MASK;
1771 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1772
1773 return paddr;
1774}
1775
1776/* Convert a machine address to kernel virtual */
Juergen Grossbf9d8342015-01-28 07:44:24 +01001777static void * __init m2v(phys_addr_t maddr)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001778{
1779 return __ka(m2p(maddr));
1780}
1781
Juan Quintela4ec53872010-09-02 15:45:43 +01001782/* Set the page permissions on an identity-mapped pages */
Juergen Grossbf9d8342015-01-28 07:44:24 +01001783static void __init set_page_prot_flags(void *addr, pgprot_t prot,
1784 unsigned long flags)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001785{
1786 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1787 pte_t pte = pfn_pte(pfn, prot);
1788
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001789 /* For PVH no need to set R/O or R/W to pin them or unpin them. */
1790 if (xen_feature(XENFEAT_auto_translated_physmap))
1791 return;
1792
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001793 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001794 BUG();
1795}
Juergen Grossbf9d8342015-01-28 07:44:24 +01001796static void __init set_page_prot(void *addr, pgprot_t prot)
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001797{
1798 return set_page_prot_flags(addr, prot, UVMF_NONE);
1799}
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -04001800#ifdef CONFIG_X86_32
Daniel Kiper3f5089532011-05-12 17:19:53 -04001801static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001802{
1803 unsigned pmdidx, pteidx;
1804 unsigned ident_pte;
1805 unsigned long pfn;
1806
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001807 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1808 PAGE_SIZE);
1809
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001810 ident_pte = 0;
1811 pfn = 0;
1812 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1813 pte_t *pte_page;
1814
1815 /* Reuse or allocate a page of ptes */
1816 if (pmd_present(pmd[pmdidx]))
1817 pte_page = m2v(pmd[pmdidx].pmd);
1818 else {
1819 /* Check for free pte pages */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001820 if (ident_pte == LEVEL1_IDENT_ENTRIES)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001821 break;
1822
1823 pte_page = &level1_ident_pgt[ident_pte];
1824 ident_pte += PTRS_PER_PTE;
1825
1826 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1827 }
1828
1829 /* Install mappings */
1830 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1831 pte_t pte;
1832
Stefano Stabellinia91d9282011-06-03 09:51:34 +00001833 if (pfn > max_pfn_mapped)
1834 max_pfn_mapped = pfn;
Stefano Stabellinia91d9282011-06-03 09:51:34 +00001835
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001836 if (!pte_none(pte_page[pteidx]))
1837 continue;
1838
1839 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1840 pte_page[pteidx] = pte;
1841 }
1842 }
1843
1844 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1845 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1846
1847 set_page_prot(pmd, PAGE_KERNEL_RO);
1848}
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -04001849#endif
Ian Campbell7e775062010-09-30 12:37:26 +01001850void __init xen_setup_machphys_mapping(void)
1851{
1852 struct xen_machphys_mapping mapping;
Ian Campbell7e775062010-09-30 12:37:26 +01001853
1854 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1855 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
Jan Beulichccbcdf72011-08-16 15:07:41 +01001856 machine_to_phys_nr = mapping.max_mfn + 1;
Ian Campbell7e775062010-09-30 12:37:26 +01001857 } else {
Jan Beulichccbcdf72011-08-16 15:07:41 +01001858 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
Ian Campbell7e775062010-09-30 12:37:26 +01001859 }
Jan Beulichccbcdf72011-08-16 15:07:41 +01001860#ifdef CONFIG_X86_32
Jan Beulich61cca2f2011-09-15 08:52:40 +01001861 WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
1862 < machine_to_phys_mapping);
Jan Beulichccbcdf72011-08-16 15:07:41 +01001863#endif
Ian Campbell7e775062010-09-30 12:37:26 +01001864}
1865
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001866#ifdef CONFIG_X86_64
Juergen Grossbf9d8342015-01-28 07:44:24 +01001867static void __init convert_pfn_mfn(void *v)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001868{
1869 pte_t *pte = v;
1870 int i;
1871
1872 /* All levels are converted the same way, so just treat them
1873 as ptes. */
1874 for (i = 0; i < PTRS_PER_PTE; i++)
1875 pte[i] = xen_make_pte(pte[i].pte);
1876}
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001877static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
1878 unsigned long addr)
1879{
1880 if (*pt_base == PFN_DOWN(__pa(addr))) {
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001881 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001882 clear_page((void *)addr);
1883 (*pt_base)++;
1884 }
1885 if (*pt_end == PFN_DOWN(__pa(addr))) {
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001886 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001887 clear_page((void *)addr);
1888 (*pt_end)--;
1889 }
1890}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001891/*
Lucas De Marchi0d2eb442011-03-17 16:24:16 -03001892 * Set up the initial kernel pagetable.
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001893 *
1894 * We can construct this by grafting the Xen provided pagetable into
1895 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
Stefan Bader0b5a5062014-09-02 11:16:01 +01001896 * level2_ident_pgt, and level2_kernel_pgt. This means that only the
1897 * kernel has a physical mapping to start with - but that's enough to
1898 * get __va working. We need to fill in the rest of the physical
1899 * mapping once some sort of allocator has been set up. NOTE: for
1900 * PVH, the page tables are native.
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001901 */
Konrad Rzeszutek Wilk3699aad2012-06-28 22:47:35 -04001902void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001903{
1904 pud_t *l3;
1905 pmd_t *l2;
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001906 unsigned long addr[3];
1907 unsigned long pt_base, pt_end;
1908 unsigned i;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001909
Stefano Stabellini14988a42011-02-18 11:32:40 +00001910 /* max_pfn_mapped is the last pfn mapped in the initial memory
1911 * mappings. Considering that on Xen after the kernel mappings we
1912 * have the mappings of some pages that don't exist in pfn space, we
1913 * set max_pfn_mapped to the last real pfn mapped. */
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001914 if (xen_start_info->mfn_list < __START_KERNEL_map)
1915 max_pfn_mapped = xen_start_info->first_p2m_pfn;
1916 else
1917 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
Stefano Stabellini14988a42011-02-18 11:32:40 +00001918
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001919 pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
1920 pt_end = pt_base + xen_start_info->nr_pt_frames;
1921
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001922 /* Zap identity mapping */
1923 init_level4_pgt[0] = __pgd(0);
1924
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001925 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1926 /* Pre-constructed entries are in pfn, so convert to mfn */
1927 /* L4[272] -> level3_ident_pgt
1928 * L4[511] -> level3_kernel_pgt */
1929 convert_pfn_mfn(init_level4_pgt);
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001930
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001931 /* L3_i[0] -> level2_ident_pgt */
1932 convert_pfn_mfn(level3_ident_pgt);
1933 /* L3_k[510] -> level2_kernel_pgt
Stefan Bader0b5a5062014-09-02 11:16:01 +01001934 * L3_k[511] -> level2_fixmap_pgt */
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001935 convert_pfn_mfn(level3_kernel_pgt);
Stefan Bader0b5a5062014-09-02 11:16:01 +01001936
1937 /* L3_k[511][506] -> level1_fixmap_pgt */
1938 convert_pfn_mfn(level2_fixmap_pgt);
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001939 }
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001940 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001941 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1942 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1943
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001944 addr[0] = (unsigned long)pgd;
1945 addr[1] = (unsigned long)l3;
1946 addr[2] = (unsigned long)l2;
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001947 /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
Stefan Bader0b5a5062014-09-02 11:16:01 +01001948 * Both L4[272][0] and L4[511][510] have entries that point to the same
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001949 * L2 (PMD) tables. Meaning that if you modify it in __va space
1950 * it will be also modified in the __ka space! (But if you just
1951 * modify the PMD table to point to other PTE's or none, then you
1952 * are OK - which is what cleanup_highmap does) */
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001953 copy_page(level2_ident_pgt, l2);
Stefan Bader0b5a5062014-09-02 11:16:01 +01001954 /* Graft it onto L4[511][510] */
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001955 copy_page(level2_kernel_pgt, l2);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001956
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001957 /* Copy the initial P->M table mappings if necessary. */
1958 i = pgd_index(xen_start_info->mfn_list);
1959 if (i && i < pgd_index(__START_KERNEL_map))
1960 init_level4_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
1961
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001962 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1963 /* Make pagetable pieces RO */
1964 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1965 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1966 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1967 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1968 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
1969 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1970 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
Stefan Bader0b5a5062014-09-02 11:16:01 +01001971 set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001972
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001973 /* Pin down new L4 */
1974 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1975 PFN_DOWN(__pa_symbol(init_level4_pgt)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001976
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001977 /* Unpin Xen-provided one */
1978 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001979
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001980 /*
1981 * At this stage there can be no user pgd, and no page
1982 * structure to attach it to, so make sure we just set kernel
1983 * pgd.
1984 */
1985 xen_mc_batch();
1986 __xen_write_cr3(true, __pa(init_level4_pgt));
1987 xen_mc_issue(PARAVIRT_LAZY_CPU);
1988 } else
1989 native_write_cr3(__pa(init_level4_pgt));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001990
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001991 /* We can't that easily rip out L3 and L2, as the Xen pagetables are
1992 * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
1993 * the initial domain. For guests using the toolstack, they are in:
1994 * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
1995 * rip out the [L4] (pgd), but for guests we shave off three pages.
1996 */
1997 for (i = 0; i < ARRAY_SIZE(addr); i++)
1998 check_pt_base(&pt_base, &pt_end, addr[i]);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001999
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04002000 /* Our (by three pages) smaller Xen pagetable that we are using */
2001 memblock_reserve(PFN_PHYS(pt_base), (pt_end - pt_base) * PAGE_SIZE);
Juergen Gross8f5b0c62015-07-17 06:51:25 +02002002 /* protect xen_start_info */
2003 memblock_reserve(__pa(xen_start_info), PAGE_SIZE);
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04002004 /* Revector the xen_start_info */
2005 xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002006}
2007#else /* !CONFIG_X86_64 */
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002008static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
2009static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
2010
Daniel Kiper3f5089532011-05-12 17:19:53 -04002011static void __init xen_write_cr3_init(unsigned long cr3)
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002012{
2013 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
2014
2015 BUG_ON(read_cr3() != __pa(initial_page_table));
2016 BUG_ON(cr3 != __pa(swapper_pg_dir));
2017
2018 /*
2019 * We are switching to swapper_pg_dir for the first time (from
2020 * initial_page_table) and therefore need to mark that page
2021 * read-only and then pin it.
2022 *
2023 * Xen disallows sharing of kernel PMDs for PAE
2024 * guests. Therefore we must copy the kernel PMD from
2025 * initial_page_table into a new kernel PMD to be used in
2026 * swapper_pg_dir.
2027 */
2028 swapper_kernel_pmd =
2029 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04002030 copy_page(swapper_kernel_pmd, initial_kernel_pmd);
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002031 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
2032 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
2033 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
2034
2035 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
2036 xen_write_cr3(cr3);
2037 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
2038
2039 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
2040 PFN_DOWN(__pa(initial_page_table)));
2041 set_page_prot(initial_page_table, PAGE_KERNEL);
2042 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
2043
2044 pv_mmu_ops.write_cr3 = &xen_write_cr3;
2045}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002046
Konrad Rzeszutek Wilk3699aad2012-06-28 22:47:35 -04002047void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002048{
2049 pmd_t *kernel_pmd;
2050
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002051 initial_kernel_pmd =
2052 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
Jeremy Fitzhardingef0991802010-08-26 16:16:28 -07002053
Stefano Stabellinia91d9282011-06-03 09:51:34 +00002054 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
2055 xen_start_info->nr_pt_frames * PAGE_SIZE +
2056 512*1024);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002057
2058 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04002059 copy_page(initial_kernel_pmd, kernel_pmd);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002060
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002061 xen_map_identity_early(initial_kernel_pmd, max_pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002062
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04002063 copy_page(initial_page_table, pgd);
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002064 initial_page_table[KERNEL_PGD_BOUNDARY] =
2065 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002066
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002067 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
2068 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002069 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
2070
2071 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
2072
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002073 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
2074 PFN_DOWN(__pa(initial_page_table)));
2075 xen_write_cr3(__pa(initial_page_table));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002076
Tejun Heo24aa0782011-07-12 11:16:06 +02002077 memblock_reserve(__pa(xen_start_info->pt_base),
Konrad Rzeszutek Wilkdc6821e2012-01-07 21:27:38 -05002078 xen_start_info->nr_pt_frames * PAGE_SIZE);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002079}
2080#endif /* CONFIG_X86_64 */
2081
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002082static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
2083
Masami Hiramatsu3b3809a2009-04-09 10:55:33 -07002084static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002085{
2086 pte_t pte;
2087
2088 phys >>= PAGE_SHIFT;
2089
2090 switch (idx) {
2091 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
Kees Cook4eefbe72013-04-10 12:24:22 -07002092 case FIX_RO_IDT:
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002093#ifdef CONFIG_X86_32
2094 case FIX_WP_TEST:
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002095# ifdef CONFIG_HIGHMEM
2096 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
2097# endif
Andy Lutomirski1ad83c82014-10-29 14:33:47 -07002098#elif defined(CONFIG_X86_VSYSCALL_EMULATION)
Andy Lutomirskif40c3302014-05-05 12:19:36 -07002099 case VSYSCALL_PAGE:
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002100#endif
Jeremy Fitzhardinge3ecb1b72009-03-07 23:48:41 -08002101 case FIX_TEXT_POKE0:
2102 case FIX_TEXT_POKE1:
2103 /* All local page mappings */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002104 pte = pfn_pte(phys, prot);
2105 break;
2106
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002107#ifdef CONFIG_X86_LOCAL_APIC
2108 case FIX_APIC_BASE: /* maps dummy local APIC */
2109 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2110 break;
2111#endif
2112
2113#ifdef CONFIG_X86_IO_APIC
2114 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
2115 /*
2116 * We just don't map the IO APIC - all access is via
2117 * hypercalls. Keep the address in the pte for reference.
2118 */
Konrad Rzeszutek Wilk27abd142012-04-16 13:53:40 -04002119 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002120 break;
2121#endif
2122
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08002123 case FIX_PARAVIRT_BOOTMAP:
2124 /* This is an MFN, but it isn't an IO mapping from the
2125 IO domain */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002126 pte = mfn_pte(phys, prot);
2127 break;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08002128
2129 default:
2130 /* By default, set_fixmap is used for hardware mappings */
David Vrabel7f2f8822014-01-08 14:01:01 +00002131 pte = mfn_pte(phys, prot);
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08002132 break;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002133 }
2134
2135 __native_set_fixmap(idx, pte);
2136
Andy Lutomirski1ad83c82014-10-29 14:33:47 -07002137#ifdef CONFIG_X86_VSYSCALL_EMULATION
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002138 /* Replicate changes to map the vsyscall page into the user
2139 pagetable vsyscall mapping. */
Andy Lutomirskif40c3302014-05-05 12:19:36 -07002140 if (idx == VSYSCALL_PAGE) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002141 unsigned long vaddr = __fix_to_virt(idx);
2142 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
2143 }
2144#endif
2145}
2146
Daniel Kiper3f5089532011-05-12 17:19:53 -04002147static void __init xen_post_allocator_init(void)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002148{
Mukesh Rathor4e44e442013-12-31 12:41:27 -05002149 if (xen_feature(XENFEAT_auto_translated_physmap))
2150 return;
2151
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002152 pv_mmu_ops.set_pte = xen_set_pte;
2153 pv_mmu_ops.set_pmd = xen_set_pmd;
2154 pv_mmu_ops.set_pud = xen_set_pud;
Kirill A. Shutemov98233362015-04-14 15:46:14 -07002155#if CONFIG_PGTABLE_LEVELS == 4
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002156 pv_mmu_ops.set_pgd = xen_set_pgd;
2157#endif
2158
2159 /* This will work as long as patching hasn't happened yet
2160 (which it hasn't) */
2161 pv_mmu_ops.alloc_pte = xen_alloc_pte;
2162 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
2163 pv_mmu_ops.release_pte = xen_release_pte;
2164 pv_mmu_ops.release_pmd = xen_release_pmd;
Kirill A. Shutemov98233362015-04-14 15:46:14 -07002165#if CONFIG_PGTABLE_LEVELS == 4
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002166 pv_mmu_ops.alloc_pud = xen_alloc_pud;
2167 pv_mmu_ops.release_pud = xen_release_pud;
2168#endif
2169
2170#ifdef CONFIG_X86_64
Konrad Rzeszutek Wilkd3eb2c82013-03-22 10:34:28 -04002171 pv_mmu_ops.write_cr3 = &xen_write_cr3;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002172 SetPagePinned(virt_to_page(level3_user_vsyscall));
2173#endif
2174 xen_mark_init_mm_pinned();
2175}
2176
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002177static void xen_leave_lazy_mmu(void)
2178{
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08002179 preempt_disable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002180 xen_mc_flush();
2181 paravirt_leave_lazy_mmu();
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08002182 preempt_enable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002183}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002184
Daniel Kiper3f5089532011-05-12 17:19:53 -04002185static const struct pv_mmu_ops xen_mmu_ops __initconst = {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002186 .read_cr2 = xen_read_cr2,
2187 .write_cr2 = xen_write_cr2,
2188
2189 .read_cr3 = xen_read_cr3,
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002190 .write_cr3 = xen_write_cr3_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002191
2192 .flush_tlb_user = xen_flush_tlb,
2193 .flush_tlb_kernel = xen_flush_tlb,
2194 .flush_tlb_single = xen_flush_tlb_single,
2195 .flush_tlb_others = xen_flush_tlb_others,
2196
2197 .pte_update = paravirt_nop,
2198 .pte_update_defer = paravirt_nop,
2199
2200 .pgd_alloc = xen_pgd_alloc,
2201 .pgd_free = xen_pgd_free,
2202
2203 .alloc_pte = xen_alloc_pte_init,
2204 .release_pte = xen_release_pte_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002205 .alloc_pmd = xen_alloc_pmd_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002206 .release_pmd = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002207
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002208 .set_pte = xen_set_pte_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002209 .set_pte_at = xen_set_pte_at,
2210 .set_pmd = xen_set_pmd_hyper,
2211
2212 .ptep_modify_prot_start = __ptep_modify_prot_start,
2213 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2214
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002215 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2216 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002217
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002218 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
2219 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002220
2221#ifdef CONFIG_X86_PAE
2222 .set_pte_atomic = xen_set_pte_atomic,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002223 .pte_clear = xen_pte_clear,
2224 .pmd_clear = xen_pmd_clear,
2225#endif /* CONFIG_X86_PAE */
2226 .set_pud = xen_set_pud_hyper,
2227
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002228 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2229 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002230
Kirill A. Shutemov98233362015-04-14 15:46:14 -07002231#if CONFIG_PGTABLE_LEVELS == 4
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002232 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2233 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002234 .set_pgd = xen_set_pgd_hyper,
2235
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002236 .alloc_pud = xen_alloc_pmd_init,
2237 .release_pud = xen_release_pmd_init,
Kirill A. Shutemov98233362015-04-14 15:46:14 -07002238#endif /* CONFIG_PGTABLE_LEVELS == 4 */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002239
2240 .activate_mm = xen_activate_mm,
2241 .dup_mmap = xen_dup_mmap,
2242 .exit_mmap = xen_exit_mmap,
2243
2244 .lazy_mode = {
2245 .enter = paravirt_enter_lazy_mmu,
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002246 .leave = xen_leave_lazy_mmu,
Boris Ostrovsky511ba862013-03-23 09:36:36 -04002247 .flush = paravirt_flush_lazy_mmu,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002248 },
2249
2250 .set_fixmap = xen_set_fixmap,
2251};
2252
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002253void __init xen_init_mmu_ops(void)
2254{
Attilio Rao7737b212012-08-21 21:22:38 +01002255 x86_init.paging.pagetable_init = xen_pagetable_init;
Mukesh Rathor76bccef2014-01-03 09:48:08 -05002256
2257 /* Optimization - we can use the HVM one but it has no idea which
2258 * VCPUs are descheduled - which means that it will needlessly IPI
2259 * them. Xen knows so let it do the job.
2260 */
2261 if (xen_feature(XENFEAT_auto_translated_physmap)) {
2262 pv_mmu_ops.flush_tlb_others = xen_flush_tlb_others;
2263 return;
2264 }
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002265 pv_mmu_ops = xen_mmu_ops;
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -07002266
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002267 memset(dummy_mapping, 0xff, PAGE_SIZE);
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002268}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002269
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002270/* Protected by xen_reservation_lock. */
2271#define MAX_CONTIG_ORDER 9 /* 2MB */
2272static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2273
2274#define VOID_PTE (mfn_pte(0, __pgprot(0)))
2275static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2276 unsigned long *in_frames,
2277 unsigned long *out_frames)
2278{
2279 int i;
2280 struct multicall_space mcs;
2281
2282 xen_mc_batch();
2283 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2284 mcs = __xen_mc_entry(0);
2285
2286 if (in_frames)
2287 in_frames[i] = virt_to_mfn(vaddr);
2288
2289 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
Konrad Rzeszutek Wilk6eaa4122011-01-18 20:09:41 -05002290 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002291
2292 if (out_frames)
2293 out_frames[i] = virt_to_pfn(vaddr);
2294 }
2295 xen_mc_issue(0);
2296}
2297
2298/*
2299 * Update the pfn-to-mfn mappings for a virtual address range, either to
2300 * point to an array of mfns, or contiguously from a single starting
2301 * mfn.
2302 */
2303static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2304 unsigned long *mfns,
2305 unsigned long first_mfn)
2306{
2307 unsigned i, limit;
2308 unsigned long mfn;
2309
2310 xen_mc_batch();
2311
2312 limit = 1u << order;
2313 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2314 struct multicall_space mcs;
2315 unsigned flags;
2316
2317 mcs = __xen_mc_entry(0);
2318 if (mfns)
2319 mfn = mfns[i];
2320 else
2321 mfn = first_mfn + i;
2322
2323 if (i < (limit - 1))
2324 flags = 0;
2325 else {
2326 if (order == 0)
2327 flags = UVMF_INVLPG | UVMF_ALL;
2328 else
2329 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2330 }
2331
2332 MULTI_update_va_mapping(mcs.mc, vaddr,
2333 mfn_pte(mfn, PAGE_KERNEL), flags);
2334
2335 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2336 }
2337
2338 xen_mc_issue(0);
2339}
2340
2341/*
2342 * Perform the hypercall to exchange a region of our pfns to point to
2343 * memory with the required contiguous alignment. Takes the pfns as
2344 * input, and populates mfns as output.
2345 *
2346 * Returns a success code indicating whether the hypervisor was able to
2347 * satisfy the request or not.
2348 */
2349static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2350 unsigned long *pfns_in,
2351 unsigned long extents_out,
2352 unsigned int order_out,
2353 unsigned long *mfns_out,
2354 unsigned int address_bits)
2355{
2356 long rc;
2357 int success;
2358
2359 struct xen_memory_exchange exchange = {
2360 .in = {
2361 .nr_extents = extents_in,
2362 .extent_order = order_in,
2363 .extent_start = pfns_in,
2364 .domid = DOMID_SELF
2365 },
2366 .out = {
2367 .nr_extents = extents_out,
2368 .extent_order = order_out,
2369 .extent_start = mfns_out,
2370 .address_bits = address_bits,
2371 .domid = DOMID_SELF
2372 }
2373 };
2374
2375 BUG_ON(extents_in << order_in != extents_out << order_out);
2376
2377 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2378 success = (exchange.nr_exchanged == extents_in);
2379
2380 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2381 BUG_ON(success && (rc != 0));
2382
2383 return success;
2384}
2385
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002386int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
Stefano Stabellini69908902013-10-09 16:56:32 +00002387 unsigned int address_bits,
2388 dma_addr_t *dma_handle)
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002389{
2390 unsigned long *in_frames = discontig_frames, out_frame;
2391 unsigned long flags;
2392 int success;
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002393 unsigned long vstart = (unsigned long)phys_to_virt(pstart);
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002394
2395 /*
2396 * Currently an auto-translated guest will not perform I/O, nor will
2397 * it require PAE page directories below 4GB. Therefore any calls to
2398 * this function are redundant and can be ignored.
2399 */
2400
2401 if (xen_feature(XENFEAT_auto_translated_physmap))
2402 return 0;
2403
2404 if (unlikely(order > MAX_CONTIG_ORDER))
2405 return -ENOMEM;
2406
2407 memset((void *) vstart, 0, PAGE_SIZE << order);
2408
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002409 spin_lock_irqsave(&xen_reservation_lock, flags);
2410
2411 /* 1. Zap current PTEs, remembering MFNs. */
2412 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2413
2414 /* 2. Get a new contiguous memory extent. */
2415 out_frame = virt_to_pfn(vstart);
2416 success = xen_exchange_memory(1UL << order, 0, in_frames,
2417 1, order, &out_frame,
2418 address_bits);
2419
2420 /* 3. Map the new extent in place of old pages. */
2421 if (success)
2422 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2423 else
2424 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2425
2426 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2427
Stefano Stabellini69908902013-10-09 16:56:32 +00002428 *dma_handle = virt_to_machine(vstart).maddr;
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002429 return success ? 0 : -ENOMEM;
2430}
2431EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2432
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002433void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002434{
2435 unsigned long *out_frames = discontig_frames, in_frame;
2436 unsigned long flags;
2437 int success;
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002438 unsigned long vstart;
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002439
2440 if (xen_feature(XENFEAT_auto_translated_physmap))
2441 return;
2442
2443 if (unlikely(order > MAX_CONTIG_ORDER))
2444 return;
2445
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002446 vstart = (unsigned long)phys_to_virt(pstart);
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002447 memset((void *) vstart, 0, PAGE_SIZE << order);
2448
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002449 spin_lock_irqsave(&xen_reservation_lock, flags);
2450
2451 /* 1. Find start MFN of contiguous extent. */
2452 in_frame = virt_to_mfn(vstart);
2453
2454 /* 2. Zap current PTEs. */
2455 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2456
2457 /* 3. Do the exchange for non-contiguous MFNs. */
2458 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2459 0, out_frames, 0);
2460
2461 /* 4. Map new pages in place of old pages. */
2462 if (success)
2463 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2464 else
2465 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2466
2467 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2468}
2469EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2470
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002471#ifdef CONFIG_XEN_PVHVM
Olaf Hering34b6f012012-10-01 21:18:01 +02002472#ifdef CONFIG_PROC_VMCORE
2473/*
2474 * This function is used in two contexts:
2475 * - the kdump kernel has to check whether a pfn of the crashed kernel
2476 * was a ballooned page. vmcore is using this function to decide
2477 * whether to access a pfn of the crashed kernel.
2478 * - the kexec kernel has to check whether a pfn was ballooned by the
2479 * previous kernel. If the pfn is ballooned, handle it properly.
2480 * Returns 0 if the pfn is not backed by a RAM page, the caller may
2481 * handle the pfn special in this case.
2482 */
2483static int xen_oldmem_pfn_is_ram(unsigned long pfn)
2484{
2485 struct xen_hvm_get_mem_type a = {
2486 .domid = DOMID_SELF,
2487 .pfn = pfn,
2488 };
2489 int ram;
2490
2491 if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a))
2492 return -ENXIO;
2493
2494 switch (a.mem_type) {
2495 case HVMMEM_mmio_dm:
2496 ram = 0;
2497 break;
2498 case HVMMEM_ram_rw:
2499 case HVMMEM_ram_ro:
2500 default:
2501 ram = 1;
2502 break;
2503 }
2504
2505 return ram;
2506}
2507#endif
2508
Stefano Stabellini59151002010-06-17 14:22:52 +01002509static void xen_hvm_exit_mmap(struct mm_struct *mm)
2510{
2511 struct xen_hvm_pagetable_dying a;
2512 int rc;
2513
2514 a.domid = DOMID_SELF;
2515 a.gpa = __pa(mm->pgd);
2516 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2517 WARN_ON_ONCE(rc < 0);
2518}
2519
2520static int is_pagetable_dying_supported(void)
2521{
2522 struct xen_hvm_pagetable_dying a;
2523 int rc = 0;
2524
2525 a.domid = DOMID_SELF;
2526 a.gpa = 0x00;
2527 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2528 if (rc < 0) {
2529 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
2530 return 0;
2531 }
2532 return 1;
2533}
2534
2535void __init xen_hvm_init_mmu_ops(void)
2536{
2537 if (is_pagetable_dying_supported())
2538 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
Olaf Hering34b6f012012-10-01 21:18:01 +02002539#ifdef CONFIG_PROC_VMCORE
2540 register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram);
2541#endif
Stefano Stabellini59151002010-06-17 14:22:52 +01002542}
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002543#endif
Stefano Stabellini59151002010-06-17 14:22:52 +01002544
Ian Campbellde1ef202009-05-21 10:09:46 +01002545#define REMAP_BATCH_SIZE 16
2546
2547struct remap_data {
David Vrabel4e8c0c82015-03-11 14:49:57 +00002548 xen_pfn_t *mfn;
2549 bool contiguous;
Ian Campbellde1ef202009-05-21 10:09:46 +01002550 pgprot_t prot;
2551 struct mmu_update *mmu_update;
2552};
2553
2554static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2555 unsigned long addr, void *data)
2556{
2557 struct remap_data *rmd = data;
David Vrabel4e8c0c82015-03-11 14:49:57 +00002558 pte_t pte = pte_mkspecial(mfn_pte(*rmd->mfn, rmd->prot));
2559
2560 /* If we have a contigious range, just update the mfn itself,
2561 else update pointer to be "next mfn". */
2562 if (rmd->contiguous)
2563 (*rmd->mfn)++;
2564 else
2565 rmd->mfn++;
Ian Campbellde1ef202009-05-21 10:09:46 +01002566
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -08002567 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
Ian Campbellde1ef202009-05-21 10:09:46 +01002568 rmd->mmu_update->val = pte_val_ma(pte);
2569 rmd->mmu_update++;
2570
2571 return 0;
2572}
2573
David Vrabel4e8c0c82015-03-11 14:49:57 +00002574static int do_remap_mfn(struct vm_area_struct *vma,
2575 unsigned long addr,
2576 xen_pfn_t *mfn, int nr,
2577 int *err_ptr, pgprot_t prot,
2578 unsigned domid,
2579 struct page **pages)
Ian Campbellde1ef202009-05-21 10:09:46 +01002580{
David Vrabel4e8c0c82015-03-11 14:49:57 +00002581 int err = 0;
Ian Campbellde1ef202009-05-21 10:09:46 +01002582 struct remap_data rmd;
2583 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
Ian Campbellde1ef202009-05-21 10:09:46 +01002584 unsigned long range;
David Vrabel4e8c0c82015-03-11 14:49:57 +00002585 int mapped = 0;
Ian Campbellde1ef202009-05-21 10:09:46 +01002586
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -07002587 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
Ian Campbellde1ef202009-05-21 10:09:46 +01002588
Mukesh Rathor77945ca2014-05-23 19:33:44 -07002589 if (xen_feature(XENFEAT_auto_translated_physmap)) {
2590#ifdef CONFIG_XEN_PVH
2591 /* We need to update the local page tables and the xen HAP */
David Vrabel4e8c0c82015-03-11 14:49:57 +00002592 return xen_xlate_remap_gfn_array(vma, addr, mfn, nr, err_ptr,
2593 prot, domid, pages);
Mukesh Rathor77945ca2014-05-23 19:33:44 -07002594#else
2595 return -EINVAL;
2596#endif
2597 }
2598
Ian Campbellde1ef202009-05-21 10:09:46 +01002599 rmd.mfn = mfn;
2600 rmd.prot = prot;
David Vrabel4e8c0c82015-03-11 14:49:57 +00002601 /* We use the err_ptr to indicate if there we are doing a contigious
2602 * mapping or a discontigious mapping. */
2603 rmd.contiguous = !err_ptr;
Ian Campbellde1ef202009-05-21 10:09:46 +01002604
2605 while (nr) {
David Vrabel4e8c0c82015-03-11 14:49:57 +00002606 int index = 0;
2607 int done = 0;
2608 int batch = min(REMAP_BATCH_SIZE, nr);
2609 int batch_left = batch;
Ian Campbellde1ef202009-05-21 10:09:46 +01002610 range = (unsigned long)batch << PAGE_SHIFT;
2611
2612 rmd.mmu_update = mmu_update;
2613 err = apply_to_page_range(vma->vm_mm, addr, range,
2614 remap_area_mfn_pte_fn, &rmd);
2615 if (err)
2616 goto out;
2617
David Vrabel4e8c0c82015-03-11 14:49:57 +00002618 /* We record the error for each page that gives an error, but
2619 * continue mapping until the whole set is done */
2620 do {
2621 int i;
2622
2623 err = HYPERVISOR_mmu_update(&mmu_update[index],
2624 batch_left, &done, domid);
2625
2626 /*
2627 * @err_ptr may be the same buffer as @mfn, so
2628 * only clear it after each chunk of @mfn is
2629 * used.
2630 */
2631 if (err_ptr) {
2632 for (i = index; i < index + done; i++)
2633 err_ptr[i] = 0;
2634 }
2635 if (err < 0) {
2636 if (!err_ptr)
2637 goto out;
2638 err_ptr[i] = err;
2639 done++; /* Skip failed frame. */
2640 } else
2641 mapped += done;
2642 batch_left -= done;
2643 index += done;
2644 } while (batch_left);
Ian Campbellde1ef202009-05-21 10:09:46 +01002645
2646 nr -= batch;
2647 addr += range;
David Vrabel4e8c0c82015-03-11 14:49:57 +00002648 if (err_ptr)
2649 err_ptr += batch;
Ian Campbellde1ef202009-05-21 10:09:46 +01002650 }
Ian Campbellde1ef202009-05-21 10:09:46 +01002651out:
2652
Konrad Rzeszutek Wilk95a7d762012-10-31 12:38:31 -04002653 xen_flush_tlb_all();
Ian Campbellde1ef202009-05-21 10:09:46 +01002654
David Vrabel4e8c0c82015-03-11 14:49:57 +00002655 return err < 0 ? err : mapped;
2656}
2657
2658int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2659 unsigned long addr,
2660 xen_pfn_t mfn, int nr,
2661 pgprot_t prot, unsigned domid,
2662 struct page **pages)
2663{
2664 return do_remap_mfn(vma, addr, &mfn, nr, NULL, prot, domid, pages);
Ian Campbellde1ef202009-05-21 10:09:46 +01002665}
2666EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
Ian Campbell9a032e32012-10-17 13:37:49 -07002667
David Vrabel4e8c0c82015-03-11 14:49:57 +00002668int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
2669 unsigned long addr,
2670 xen_pfn_t *mfn, int nr,
2671 int *err_ptr, pgprot_t prot,
2672 unsigned domid, struct page **pages)
2673{
2674 /* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
2675 * and the consequences later is quite hard to detect what the actual
2676 * cause of "wrong memory was mapped in".
2677 */
2678 BUG_ON(err_ptr == NULL);
2679 return do_remap_mfn(vma, addr, mfn, nr, err_ptr, prot, domid, pages);
2680}
2681EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array);
2682
2683
Ian Campbell9a032e32012-10-17 13:37:49 -07002684/* Returns: 0 success */
2685int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
2686 int numpgs, struct page **pages)
2687{
2688 if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
2689 return 0;
2690
Mukesh Rathor77945ca2014-05-23 19:33:44 -07002691#ifdef CONFIG_XEN_PVH
David Vrabel628c28e2015-03-11 14:49:56 +00002692 return xen_xlate_unmap_gfn_range(vma, numpgs, pages);
Mukesh Rathor77945ca2014-05-23 19:33:44 -07002693#else
Ian Campbell9a032e32012-10-17 13:37:49 -07002694 return -EINVAL;
Mukesh Rathor77945ca2014-05-23 19:33:44 -07002695#endif
Ian Campbell9a032e32012-10-17 13:37:49 -07002696}
2697EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);