blob: 67b41017f7b85d0a5baeae213b90582917e79783 [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070043#include <linux/debugfs.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044#include <linux/bug.h>
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -070045#include <linux/vmalloc.h>
Randy Dunlap44408ad2009-05-12 13:31:40 -070046#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090047#include <linux/gfp.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070048
49#include <asm/pgtable.h>
50#include <asm/tlbflush.h>
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -070051#include <asm/fixmap.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070052#include <asm/mmu_context.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080053#include <asm/setup.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070054#include <asm/paravirt.h>
Alex Nixon7347b402010-02-19 13:31:06 -050055#include <asm/e820.h>
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070056#include <asm/linkage.h>
Alex Nixon08bbc9d2009-02-09 12:05:46 -080057#include <asm/page.h>
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -070058#include <asm/pat.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070059
60#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070061#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070062
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080063#include <xen/xen.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070064#include <xen/page.h>
65#include <xen/interface/xen.h>
Stefano Stabellini59151002010-06-17 14:22:52 +010066#include <xen/interface/hvm/hvm_op.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080067#include <xen/interface/version.h>
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080068#include <xen/interface/memory.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080069#include <xen/hvc-console.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070070
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070071#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070072#include "mmu.h"
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070073#include "debugfs.h"
74
75#define MMU_UPDATE_HISTO 30
76
Alex Nixon19001c82009-02-09 12:05:46 -080077/*
78 * Protects atomic reservation decrease/increase against concurrent increases.
79 * Also protects non-atomic updates of current_pages and driver_pages, and
80 * balloon lists.
81 */
82DEFINE_SPINLOCK(xen_reservation_lock);
83
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070084#ifdef CONFIG_XEN_DEBUG_FS
85
86static struct {
87 u32 pgd_update;
88 u32 pgd_update_pinned;
89 u32 pgd_update_batched;
90
91 u32 pud_update;
92 u32 pud_update_pinned;
93 u32 pud_update_batched;
94
95 u32 pmd_update;
96 u32 pmd_update_pinned;
97 u32 pmd_update_batched;
98
99 u32 pte_update;
100 u32 pte_update_pinned;
101 u32 pte_update_batched;
102
103 u32 mmu_update;
104 u32 mmu_update_extended;
105 u32 mmu_update_histo[MMU_UPDATE_HISTO];
106
107 u32 prot_commit;
108 u32 prot_commit_batched;
109
110 u32 set_pte_at;
111 u32 set_pte_at_batched;
112 u32 set_pte_at_pinned;
113 u32 set_pte_at_current;
114 u32 set_pte_at_kernel;
115} mmu_stats;
116
117static u8 zero_stats;
118
119static inline void check_zero(void)
120{
121 if (unlikely(zero_stats)) {
122 memset(&mmu_stats, 0, sizeof(mmu_stats));
123 zero_stats = 0;
124 }
125}
126
127#define ADD_STATS(elem, val) \
128 do { check_zero(); mmu_stats.elem += (val); } while(0)
129
130#else /* !CONFIG_XEN_DEBUG_FS */
131
132#define ADD_STATS(elem, val) do { (void)(val); } while(0)
133
134#endif /* CONFIG_XEN_DEBUG_FS */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700135
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -0800136
137/*
138 * Identity map, in addition to plain kernel map. This needs to be
139 * large enough to allocate page table pages to allocate the rest.
140 * Each page can map 2MB.
141 */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -0700142#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
143static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -0800144
145#ifdef CONFIG_X86_64
146/* l3 pud for userspace vsyscall mapping */
147static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
148#endif /* CONFIG_X86_64 */
149
150/*
151 * Note about cr3 (pagetable base) values:
152 *
153 * xen_cr3 contains the current logical cr3 value; it contains the
154 * last set cr3. This may not be the current effective cr3, because
155 * its update may be being lazily deferred. However, a vcpu looking
156 * at its own cr3 can use this value knowing that it everything will
157 * be self-consistent.
158 *
159 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
160 * hypercall to set the vcpu cr3 is complete (so it may be a little
161 * out of date, but it will never be set early). If one vcpu is
162 * looking at another vcpu's cr3 value, it should use this variable.
163 */
164DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
165DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
166
167
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700168/*
169 * Just beyond the highest usermode address. STACK_TOP_MAX has a
170 * redzone above it, so round it up to a PGD boundary.
171 */
172#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
173
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700174/*
175 * Xen leaves the responsibility for maintaining p2m mappings to the
176 * guests themselves, but it must also access and update the p2m array
177 * during suspend/resume when all the pages are reallocated.
178 *
179 * The p2m table is logically a flat array, but we implement it as a
180 * three-level tree to allow the address space to be sparse.
181 *
182 * Xen
183 * |
184 * p2m_top p2m_top_mfn
185 * / \ / \
186 * p2m_mid p2m_mid p2m_mid_mfn p2m_mid_mfn
187 * / \ / \ / /
188 * p2m p2m p2m p2m p2m p2m p2m ...
189 *
190 * The p2m_top and p2m_top_mfn levels are limited to 1 page, so the
191 * maximum representable pseudo-physical address space is:
192 * P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages
193 *
194 * P2M_PER_PAGE depends on the architecture, as a mfn is always
195 * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to
196 * 512 and 1024 entries respectively.
197 */
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700198
Jeremy Fitzhardinge2f7acb22010-09-15 13:32:49 -0700199unsigned long xen_max_p2m_pfn __read_mostly;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100200
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700201#define P2M_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
202#define P2M_MID_PER_PAGE (PAGE_SIZE / sizeof(unsigned long *))
203#define P2M_TOP_PER_PAGE (PAGE_SIZE / sizeof(unsigned long **))
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100204
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700205#define MAX_P2M_PFN (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100206
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700207/* Placeholders for holes in the address space */
208static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE);
209static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE);
210static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_missing_mfn, P2M_MID_PER_PAGE);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100211
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700212static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE);
213static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE);
214
215RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
216RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100217
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100218static inline unsigned p2m_top_index(unsigned long pfn)
219{
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700220 BUG_ON(pfn >= MAX_P2M_PFN);
221 return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE);
222}
223
224static inline unsigned p2m_mid_index(unsigned long pfn)
225{
226 return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100227}
228
229static inline unsigned p2m_index(unsigned long pfn)
230{
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700231 return pfn % P2M_PER_PAGE;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100232}
233
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700234static void p2m_top_init(unsigned long ***top)
235{
236 unsigned i;
237
238 for (i = 0; i < P2M_TOP_PER_PAGE; i++)
239 top[i] = p2m_mid_missing;
240}
241
242static void p2m_top_mfn_init(unsigned long *top)
243{
244 unsigned i;
245
246 for (i = 0; i < P2M_TOP_PER_PAGE; i++)
247 top[i] = virt_to_mfn(p2m_mid_missing_mfn);
248}
249
250static void p2m_mid_init(unsigned long **mid)
251{
252 unsigned i;
253
254 for (i = 0; i < P2M_MID_PER_PAGE; i++)
255 mid[i] = p2m_missing;
256}
257
258static void p2m_mid_mfn_init(unsigned long *mid)
259{
260 unsigned i;
261
262 for (i = 0; i < P2M_MID_PER_PAGE; i++)
263 mid[i] = virt_to_mfn(p2m_missing);
264}
265
266static void p2m_init(unsigned long *p2m)
267{
268 unsigned i;
269
270 for (i = 0; i < P2M_MID_PER_PAGE; i++)
271 p2m[i] = INVALID_P2M_ENTRY;
272}
273
274/*
275 * Build the parallel p2m_top_mfn and p2m_mid_mfn structures
276 *
277 * This is called both at boot time, and after resuming from suspend:
278 * - At boot time we're called very early, and must use extend_brk()
279 * to allocate memory.
280 *
281 * - After resume we're called from within stop_machine, but the mfn
282 * tree should alreay be completely allocated.
283 */
Ian Campbellfa24ba62009-11-21 11:32:49 +0000284void xen_build_mfn_list_list(void)
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100285{
Jeremy Fitzhardingec3798062010-08-27 13:42:04 -0700286 unsigned pfn;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100287
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700288 /* Pre-initialize p2m_top_mfn to be completely missing */
289 if (p2m_top_mfn == NULL) {
290 p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
291 p2m_mid_mfn_init(p2m_mid_missing_mfn);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100292
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700293 p2m_top_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
294 p2m_top_mfn_init(p2m_top_mfn);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100295 }
296
Jeremy Fitzhardinge2f7acb22010-09-15 13:32:49 -0700297 for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) {
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700298 unsigned topidx = p2m_top_index(pfn);
299 unsigned mididx = p2m_mid_index(pfn);
300 unsigned long **mid;
301 unsigned long mid_mfn;
302 unsigned long *mid_mfn_p;
303
304 mid = p2m_top[topidx];
305
306 /* Don't bother allocating any mfn mid levels if
307 they're just missing */
308 if (mid[mididx] == p2m_missing)
309 continue;
310
311 mid_mfn = p2m_top_mfn[topidx];
312 mid_mfn_p = mfn_to_virt(mid_mfn);
313
314 if (mid_mfn_p == p2m_mid_missing_mfn) {
315 /*
316 * XXX boot-time only! We should never find
317 * missing parts of the mfn tree after
318 * runtime. extend_brk() will BUG if we call
319 * it too late.
320 */
321 mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
322 p2m_mid_mfn_init(mid_mfn_p);
323
324 mid_mfn = virt_to_mfn(mid_mfn_p);
325
326 p2m_top_mfn[topidx] = mid_mfn;
327 }
328
329 mid_mfn_p[mididx] = virt_to_mfn(mid[mididx]);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100330 }
Jeremy Fitzhardingecdaead62009-02-27 15:34:59 -0800331}
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100332
Jeremy Fitzhardingecdaead62009-02-27 15:34:59 -0800333void xen_setup_mfn_list_list(void)
334{
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100335 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
336
337 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700338 virt_to_mfn(p2m_top_mfn);
Jeremy Fitzhardinge2f7acb22010-09-15 13:32:49 -0700339 HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100340}
341
342/* Set up p2m_top to point to the domain-builder provided p2m pages */
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100343void __init xen_build_dynamic_phys_to_machine(void)
344{
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100345 unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100346 unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100347 unsigned pfn;
Jeremy Fitzhardingea171ce62010-08-26 15:04:48 -0700348
Jeremy Fitzhardinge2f7acb22010-09-15 13:32:49 -0700349 xen_max_p2m_pfn = max_pfn;
Jeremy Fitzhardingea2e87522010-08-26 16:08:31 -0700350
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700351 p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
352 p2m_init(p2m_missing);
Jeremy Fitzhardingea171ce62010-08-26 15:04:48 -0700353
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700354 p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
355 p2m_mid_init(p2m_mid_missing);
Jeremy Fitzhardingea171ce62010-08-26 15:04:48 -0700356
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700357 p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE);
358 p2m_top_init(p2m_top);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100359
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700360 /*
361 * The domain builder gives us a pre-constructed p2m array in
362 * mfn_list for all the pages initially given to us, so we just
363 * need to graft that into our tree structure.
364 */
365 for (pfn = 0; pfn < max_pfn; pfn += P2M_PER_PAGE) {
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100366 unsigned topidx = p2m_top_index(pfn);
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700367 unsigned mididx = p2m_mid_index(pfn);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100368
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700369 if (p2m_top[topidx] == p2m_mid_missing) {
370 unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
371 p2m_mid_init(mid);
372
373 p2m_top[topidx] = mid;
374 }
375
376 p2m_top[topidx][mididx] = &mfn_list[pfn];
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100377 }
378}
379
380unsigned long get_phys_to_machine(unsigned long pfn)
381{
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700382 unsigned topidx, mididx, idx;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100383
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700384 if (unlikely(pfn >= MAX_P2M_PFN))
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100385 return INVALID_P2M_ENTRY;
386
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100387 topidx = p2m_top_index(pfn);
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700388 mididx = p2m_mid_index(pfn);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100389 idx = p2m_index(pfn);
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700390
391 return p2m_top[topidx][mididx][idx];
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100392}
Ingo Molnar15ce60052008-06-02 13:20:11 +0200393EXPORT_SYMBOL_GPL(get_phys_to_machine);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100394
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700395static void *alloc_p2m_page(void)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100396{
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700397 return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800398}
399
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700400static void free_p2m_page(void *p)
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800401{
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700402 free_page((unsigned long)p);
403}
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800404
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700405/*
406 * Fully allocate the p2m structure for a given pfn. We need to check
407 * that both the top and mid levels are allocated, and make sure the
408 * parallel mfn tree is kept in sync. We may race with other cpus, so
409 * the new pages are installed with cmpxchg; if we lose the race then
410 * simply free the page we allocated and use the one that's there.
411 */
412static bool alloc_p2m(unsigned long pfn)
413{
414 unsigned topidx, mididx;
415 unsigned long ***top_p, **mid;
416 unsigned long *top_mfn_p, *mid_mfn;
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800417
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700418 topidx = p2m_top_index(pfn);
419 mididx = p2m_mid_index(pfn);
420
421 top_p = &p2m_top[topidx];
422 mid = *top_p;
423
424 if (mid == p2m_mid_missing) {
425 /* Mid level is missing, allocate a new one */
426 mid = alloc_p2m_page();
427 if (!mid)
428 return false;
429
430 p2m_mid_init(mid);
431
432 if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing)
433 free_p2m_page(mid);
434 }
435
436 top_mfn_p = &p2m_top_mfn[topidx];
437 mid_mfn = mfn_to_virt(*top_mfn_p);
438
439 if (mid_mfn == p2m_mid_missing_mfn) {
440 /* Separately check the mid mfn level */
441 unsigned long missing_mfn;
442 unsigned long mid_mfn_mfn;
443
444 mid_mfn = alloc_p2m_page();
445 if (!mid_mfn)
446 return false;
447
448 p2m_mid_mfn_init(mid_mfn);
449
450 missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
451 mid_mfn_mfn = virt_to_mfn(mid_mfn);
452 if (cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn) != missing_mfn)
453 free_p2m_page(mid_mfn);
454 }
455
456 if (p2m_top[topidx][mididx] == p2m_missing) {
457 /* p2m leaf page is missing */
458 unsigned long *p2m;
459
460 p2m = alloc_p2m_page();
461 if (!p2m)
462 return false;
463
464 p2m_init(p2m);
465
466 if (cmpxchg(&mid[mididx], p2m_missing, p2m) != p2m_missing)
467 free_p2m_page(p2m);
468 else
469 mid_mfn[mididx] = virt_to_mfn(p2m);
470 }
471
472 return true;
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800473}
474
475/* Try to install p2m mapping; fail if intermediate bits missing */
476bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
477{
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700478 unsigned topidx, mididx, idx;
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800479
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700480 if (unlikely(pfn >= MAX_P2M_PFN)) {
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800481 BUG_ON(mfn != INVALID_P2M_ENTRY);
482 return true;
483 }
484
485 topidx = p2m_top_index(pfn);
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700486 mididx = p2m_mid_index(pfn);
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800487 idx = p2m_index(pfn);
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700488
489 if (p2m_top[topidx][mididx] == p2m_missing)
490 return mfn == INVALID_P2M_ENTRY;
491
492 p2m_top[topidx][mididx][idx] = mfn;
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800493
494 return true;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100495}
496
Jeremy Fitzhardingec3798062010-08-27 13:42:04 -0700497bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100498{
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100499 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
500 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
Jeremy Fitzhardingec3798062010-08-27 13:42:04 -0700501 return true;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100502 }
503
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800504 if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
Jeremy Fitzhardingec3798062010-08-27 13:42:04 -0700505 if (!alloc_p2m(pfn))
506 return false;
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100507
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800508 if (!__set_phys_to_machine(pfn, mfn))
Jeremy Fitzhardingec3798062010-08-27 13:42:04 -0700509 return false;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100510 }
Jeremy Fitzhardingec3798062010-08-27 13:42:04 -0700511
512 return true;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100513}
514
Jeremy Fitzhardinge9976b392009-02-27 09:19:26 -0800515unsigned long arbitrary_virt_to_mfn(void *vaddr)
516{
517 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
518
519 return PFN_DOWN(maddr.maddr);
520}
521
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700522xmaddr_t arbitrary_virt_to_machine(void *vaddr)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700523{
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700524 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100525 unsigned int level;
Chris Lalancette9f32d212008-10-23 17:40:25 -0700526 pte_t *pte;
527 unsigned offset;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700528
Chris Lalancette9f32d212008-10-23 17:40:25 -0700529 /*
530 * if the PFN is in the linear mapped vaddr range, we can just use
531 * the (quick) virt_to_machine() p2m lookup
532 */
533 if (virt_addr_valid(vaddr))
534 return virt_to_machine(vaddr);
535
536 /* otherwise we have to do a (slower) full page-table walk */
537
538 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700539 BUG_ON(pte == NULL);
Chris Lalancette9f32d212008-10-23 17:40:25 -0700540 offset = address & ~PAGE_MASK;
Jeremy Fitzhardingeebd879e2008-07-08 15:06:54 -0700541 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700542}
543
544void make_lowmem_page_readonly(void *vaddr)
545{
546 pte_t *pte, ptev;
547 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100548 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700549
Ingo Molnarf0646e42008-01-30 13:33:43 +0100550 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700551 BUG_ON(pte == NULL);
552
553 ptev = pte_wrprotect(*pte);
554
555 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
556 BUG();
557}
558
559void make_lowmem_page_readwrite(void *vaddr)
560{
561 pte_t *pte, ptev;
562 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100563 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700564
Ingo Molnarf0646e42008-01-30 13:33:43 +0100565 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700566 BUG_ON(pte == NULL);
567
568 ptev = pte_mkwrite(*pte);
569
570 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
571 BUG();
572}
573
574
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700575static bool xen_page_pinned(void *ptr)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100576{
577 struct page *page = virt_to_page(ptr);
578
579 return PagePinned(page);
580}
581
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800582static bool xen_iomap_pte(pte_t pte)
583{
Alex Nixon7347b402010-02-19 13:31:06 -0500584 return pte_flags(pte) & _PAGE_IOMAP;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800585}
586
587static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval)
588{
589 struct multicall_space mcs;
590 struct mmu_update *u;
591
592 mcs = xen_mc_entry(sizeof(*u));
593 u = mcs.args;
594
595 /* ptep might be kmapped when using 32-bit HIGHPTE */
596 u->ptr = arbitrary_virt_to_machine(ptep).maddr;
597 u->val = pte_val_ma(pteval);
598
599 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_IO);
600
601 xen_mc_issue(PARAVIRT_LAZY_MMU);
602}
603
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700604static void xen_extend_mmu_update(const struct mmu_update *update)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700605{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700606 struct multicall_space mcs;
607 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700608
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700609 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
610
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700611 if (mcs.mc != NULL) {
612 ADD_STATS(mmu_update_extended, 1);
613 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1);
614
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700615 mcs.mc->args[1]++;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700616
617 if (mcs.mc->args[1] < MMU_UPDATE_HISTO)
618 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1);
619 else
620 ADD_STATS(mmu_update_histo[0], 1);
621 } else {
622 ADD_STATS(mmu_update, 1);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700623 mcs = __xen_mc_entry(sizeof(*u));
624 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700625 ADD_STATS(mmu_update_histo[1], 1);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700626 }
627
628 u = mcs.args;
629 *u = *update;
630}
631
632void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
633{
634 struct mmu_update u;
635
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700636 preempt_disable();
637
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700638 xen_mc_batch();
639
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700640 /* ptr may be ioremapped for 64-bit pagetable setup */
641 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700642 u.val = pmd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700643 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700644
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700645 ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
646
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700647 xen_mc_issue(PARAVIRT_LAZY_MMU);
648
649 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700650}
651
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100652void xen_set_pmd(pmd_t *ptr, pmd_t val)
653{
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700654 ADD_STATS(pmd_update, 1);
655
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100656 /* If page is not pinned, we can just update the entry
657 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700658 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100659 *ptr = val;
660 return;
661 }
662
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700663 ADD_STATS(pmd_update_pinned, 1);
664
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100665 xen_set_pmd_hyper(ptr, val);
666}
667
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700668/*
669 * Associate a virtual page frame with a given physical page frame
670 * and protection flags for that frame.
671 */
672void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
673{
Jeremy Fitzhardinge836fe2f2008-07-08 15:06:58 -0700674 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700675}
676
677void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
678 pte_t *ptep, pte_t pteval)
679{
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800680 if (xen_iomap_pte(pteval)) {
681 xen_set_iomap_pte(ptep, pteval);
682 goto out;
683 }
684
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700685 ADD_STATS(set_pte_at, 1);
686// ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep));
687 ADD_STATS(set_pte_at_current, mm == current->mm);
688 ADD_STATS(set_pte_at_kernel, mm == &init_mm);
689
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700690 if (mm == current->mm || mm == &init_mm) {
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700691 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700692 struct multicall_space mcs;
693 mcs = xen_mc_entry(0);
694
695 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700696 ADD_STATS(set_pte_at_batched, 1);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700697 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700698 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700699 } else
700 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700701 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700702 }
703 xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700704
Jeremy Fitzhardinge2829b442009-02-17 23:53:19 -0800705out: return;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700706}
707
Tejf63c2f22008-12-16 11:56:06 -0800708pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
709 unsigned long addr, pte_t *ptep)
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700710{
711 /* Just return the pte as-is. We preserve the bits on commit */
712 return *ptep;
713}
714
715void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
716 pte_t *ptep, pte_t pte)
717{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700718 struct mmu_update u;
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700719
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700720 xen_mc_batch();
721
Chris Lalancette9f32d212008-10-23 17:40:25 -0700722 u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700723 u.val = pte_val_ma(pte);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700724 xen_extend_mmu_update(&u);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700725
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700726 ADD_STATS(prot_commit, 1);
727 ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
728
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700729 xen_mc_issue(PARAVIRT_LAZY_MMU);
730}
731
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700732/* Assume pteval_t is equivalent to all the other *val_t types. */
733static pteval_t pte_mfn_to_pfn(pteval_t val)
734{
735 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700736 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700737 pteval_t flags = val & PTE_FLAGS_MASK;
Jeremy Fitzhardinged8355ac2008-07-03 22:10:18 -0700738 val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700739 }
740
741 return val;
742}
743
744static pteval_t pte_pfn_to_mfn(pteval_t val)
745{
746 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700747 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700748 pteval_t flags = val & PTE_FLAGS_MASK;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700749 unsigned long mfn = pfn_to_mfn(pfn);
750
751 /*
752 * If there's no mfn for the pfn, then just create an
753 * empty non-present pte. Unfortunately this loses
754 * information about the original pfn, so
755 * pte_mfn_to_pfn is asymmetric.
756 */
757 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
758 mfn = 0;
759 flags = 0;
760 }
761
762 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700763 }
764
765 return val;
766}
767
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800768static pteval_t iomap_pte(pteval_t val)
769{
770 if (val & _PAGE_PRESENT) {
771 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
772 pteval_t flags = val & PTE_FLAGS_MASK;
773
774 /* We assume the pte frame number is a MFN, so
775 just use it as-is. */
776 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
777 }
778
779 return val;
780}
781
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700782pteval_t xen_pte_val(pte_t pte)
783{
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700784 pteval_t pteval = pte.pte;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800785
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700786 /* If this is a WC pte, convert back from Xen WC to Linux WC */
787 if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {
788 WARN_ON(!pat_enabled);
789 pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
790 }
791
792 if (xen_initial_domain() && (pteval & _PAGE_IOMAP))
793 return pteval;
794
795 return pte_mfn_to_pfn(pteval);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700796}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800797PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700798
799pgdval_t xen_pgd_val(pgd_t pgd)
800{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700801 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700802}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800803PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700804
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700805/*
806 * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7
807 * are reserved for now, to correspond to the Intel-reserved PAT
808 * types.
809 *
810 * We expect Linux's PAT set as follows:
811 *
812 * Idx PTE flags Linux Xen Default
813 * 0 WB WB WB
814 * 1 PWT WC WT WT
815 * 2 PCD UC- UC- UC-
816 * 3 PCD PWT UC UC UC
817 * 4 PAT WB WC WB
818 * 5 PAT PWT WC WP WT
819 * 6 PAT PCD UC- UC UC-
820 * 7 PAT PCD PWT UC UC UC
821 */
822
823void xen_set_pat(u64 pat)
824{
825 /* We expect Linux to use a PAT setting of
826 * UC UC- WC WB (ignoring the PAT flag) */
827 WARN_ON(pat != 0x0007010600070106ull);
828}
829
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700830pte_t xen_make_pte(pteval_t pte)
831{
Alex Nixon7347b402010-02-19 13:31:06 -0500832 phys_addr_t addr = (pte & PTE_PFN_MASK);
833
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700834 /* If Linux is trying to set a WC pte, then map to the Xen WC.
835 * If _PAGE_PAT is set, then it probably means it is really
836 * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope
837 * things work out OK...
838 *
839 * (We should never see kernel mappings with _PAGE_PSE set,
840 * but we could see hugetlbfs mappings, I think.).
841 */
842 if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) {
843 if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)
844 pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
845 }
846
Alex Nixon7347b402010-02-19 13:31:06 -0500847 /*
848 * Unprivileged domains are allowed to do IOMAPpings for
849 * PCI passthrough, but not map ISA space. The ISA
850 * mappings are just dummy local mappings to keep other
851 * parts of the kernel happy.
852 */
853 if (unlikely(pte & _PAGE_IOMAP) &&
854 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800855 pte = iomap_pte(pte);
Alex Nixon7347b402010-02-19 13:31:06 -0500856 } else {
857 pte &= ~_PAGE_IOMAP;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800858 pte = pte_pfn_to_mfn(pte);
Alex Nixon7347b402010-02-19 13:31:06 -0500859 }
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800860
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700861 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700862}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800863PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700864
865pgd_t xen_make_pgd(pgdval_t pgd)
866{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700867 pgd = pte_pfn_to_mfn(pgd);
868 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700869}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800870PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700871
872pmdval_t xen_pmd_val(pmd_t pmd)
873{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700874 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700875}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800876PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100877
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100878void xen_set_pud_hyper(pud_t *ptr, pud_t val)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700879{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700880 struct mmu_update u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700881
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700882 preempt_disable();
883
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700884 xen_mc_batch();
885
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700886 /* ptr may be ioremapped for 64-bit pagetable setup */
887 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700888 u.val = pud_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700889 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700890
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700891 ADD_STATS(pud_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
892
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700893 xen_mc_issue(PARAVIRT_LAZY_MMU);
894
895 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700896}
897
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100898void xen_set_pud(pud_t *ptr, pud_t val)
899{
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700900 ADD_STATS(pud_update, 1);
901
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100902 /* If page is not pinned, we can just update the entry
903 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700904 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100905 *ptr = val;
906 return;
907 }
908
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700909 ADD_STATS(pud_update_pinned, 1);
910
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100911 xen_set_pud_hyper(ptr, val);
912}
913
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700914void xen_set_pte(pte_t *ptep, pte_t pte)
915{
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800916 if (xen_iomap_pte(pte)) {
917 xen_set_iomap_pte(ptep, pte);
918 return;
919 }
920
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700921 ADD_STATS(pte_update, 1);
922// ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
923 ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
924
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700925#ifdef CONFIG_X86_PAE
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700926 ptep->pte_high = pte.pte_high;
927 smp_wmb();
928 ptep->pte_low = pte.pte_low;
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700929#else
930 *ptep = pte;
931#endif
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700932}
933
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700934#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700935void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
936{
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800937 if (xen_iomap_pte(pte)) {
938 xen_set_iomap_pte(ptep, pte);
939 return;
940 }
941
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700942 set_64bit((u64 *)ptep, native_pte_val(pte));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700943}
944
945void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
946{
947 ptep->pte_low = 0;
948 smp_wmb(); /* make sure low gets written first */
949 ptep->pte_high = 0;
950}
951
952void xen_pmd_clear(pmd_t *pmdp)
953{
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100954 set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700955}
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700956#endif /* CONFIG_X86_PAE */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700957
Jeremy Fitzhardingeabf33032008-03-17 16:37:07 -0700958pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700959{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700960 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700961 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700962}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800963PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700964
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700965#if PAGETABLE_LEVELS == 4
966pudval_t xen_pud_val(pud_t pud)
967{
968 return pte_mfn_to_pfn(pud.pud);
969}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800970PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700971
972pud_t xen_make_pud(pudval_t pud)
973{
974 pud = pte_pfn_to_mfn(pud);
975
976 return native_make_pud(pud);
977}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800978PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700979
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700980pgd_t *xen_get_user_pgd(pgd_t *pgd)
981{
982 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
983 unsigned offset = pgd - pgd_page;
984 pgd_t *user_ptr = NULL;
985
986 if (offset < pgd_index(USER_LIMIT)) {
987 struct page *page = virt_to_page(pgd_page);
988 user_ptr = (pgd_t *)page->private;
989 if (user_ptr)
990 user_ptr += offset;
991 }
992
993 return user_ptr;
994}
995
996static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700997{
998 struct mmu_update u;
999
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -07001000 u.ptr = virt_to_machine(ptr).maddr;
1001 u.val = pgd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001002 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001003}
1004
1005/*
1006 * Raw hypercall-based set_pgd, intended for in early boot before
1007 * there's a page structure. This implies:
1008 * 1. The only existing pagetable is the kernel's
1009 * 2. It is always pinned
1010 * 3. It has no user pagetable attached to it
1011 */
1012void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
1013{
1014 preempt_disable();
1015
1016 xen_mc_batch();
1017
1018 __xen_set_pgd_hyper(ptr, val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -07001019
1020 xen_mc_issue(PARAVIRT_LAZY_MMU);
1021
1022 preempt_enable();
1023}
1024
1025void xen_set_pgd(pgd_t *ptr, pgd_t val)
1026{
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001027 pgd_t *user_ptr = xen_get_user_pgd(ptr);
1028
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07001029 ADD_STATS(pgd_update, 1);
1030
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -07001031 /* If page is not pinned, we can just update the entry
1032 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001033 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -07001034 *ptr = val;
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001035 if (user_ptr) {
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001036 WARN_ON(xen_page_pinned(user_ptr));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001037 *user_ptr = val;
1038 }
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -07001039 return;
1040 }
1041
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07001042 ADD_STATS(pgd_update_pinned, 1);
1043 ADD_STATS(pgd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
1044
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001045 /* If it's pinned, then we can at least batch the kernel and
1046 user updates together. */
1047 xen_mc_batch();
1048
1049 __xen_set_pgd_hyper(ptr, val);
1050 if (user_ptr)
1051 __xen_set_pgd_hyper(user_ptr, val);
1052
1053 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -07001054}
1055#endif /* PAGETABLE_LEVELS == 4 */
1056
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001057/*
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001058 * (Yet another) pagetable walker. This one is intended for pinning a
1059 * pagetable. This means that it walks a pagetable and calls the
1060 * callback function on each page it finds making up the page table,
1061 * at every level. It walks the entire pagetable, but it only bothers
1062 * pinning pte pages which are below limit. In the normal case this
1063 * will be STACK_TOP_MAX, but at boot we need to pin up to
1064 * FIXADDR_TOP.
1065 *
1066 * For 32-bit the important bit is that we don't pin beyond there,
1067 * because then we start getting into Xen's ptes.
1068 *
1069 * For 64-bit, we must skip the Xen hole in the middle of the address
1070 * space, just after the big x86-64 virtual hole.
1071 */
Ian Campbell86bbc2c2008-11-21 10:21:33 +00001072static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
1073 int (*func)(struct mm_struct *mm, struct page *,
1074 enum pt_level),
1075 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001076{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001077 int flush = 0;
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001078 unsigned hole_low, hole_high;
1079 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
1080 unsigned pgdidx, pudidx, pmdidx;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001081
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001082 /* The limit is the last byte to be touched */
1083 limit--;
1084 BUG_ON(limit >= FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001085
1086 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001087 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001088
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001089 /*
1090 * 64-bit has a great big hole in the middle of the address
1091 * space, which contains the Xen mappings. On 32-bit these
1092 * will end up making a zero-sized hole and so is a no-op.
1093 */
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001094 hole_low = pgd_index(USER_LIMIT);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001095 hole_high = pgd_index(PAGE_OFFSET);
1096
1097 pgdidx_limit = pgd_index(limit);
1098#if PTRS_PER_PUD > 1
1099 pudidx_limit = pud_index(limit);
1100#else
1101 pudidx_limit = 0;
1102#endif
1103#if PTRS_PER_PMD > 1
1104 pmdidx_limit = pmd_index(limit);
1105#else
1106 pmdidx_limit = 0;
1107#endif
1108
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001109 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001110 pud_t *pud;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001111
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001112 if (pgdidx >= hole_low && pgdidx < hole_high)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001113 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001114
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001115 if (!pgd_val(pgd[pgdidx]))
1116 continue;
1117
1118 pud = pud_offset(&pgd[pgdidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001119
1120 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001121 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001122
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001123 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001124 pmd_t *pmd;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001125
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001126 if (pgdidx == pgdidx_limit &&
1127 pudidx > pudidx_limit)
1128 goto out;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001129
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001130 if (pud_none(pud[pudidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001131 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001132
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001133 pmd = pmd_offset(&pud[pudidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001134
1135 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001136 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001137
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001138 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
1139 struct page *pte;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001140
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001141 if (pgdidx == pgdidx_limit &&
1142 pudidx == pudidx_limit &&
1143 pmdidx > pmdidx_limit)
1144 goto out;
1145
1146 if (pmd_none(pmd[pmdidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001147 continue;
1148
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001149 pte = pmd_page(pmd[pmdidx]);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001150 flush |= (*func)(mm, pte, PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001151 }
1152 }
1153 }
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -07001154
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001155out:
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -07001156 /* Do the top level last, so that the callbacks can use it as
1157 a cue to do final things like tlb flushes. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001158 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001159
1160 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001161}
1162
Ian Campbell86bbc2c2008-11-21 10:21:33 +00001163static int xen_pgd_walk(struct mm_struct *mm,
1164 int (*func)(struct mm_struct *mm, struct page *,
1165 enum pt_level),
1166 unsigned long limit)
1167{
1168 return __xen_pgd_walk(mm, mm->pgd, func, limit);
1169}
1170
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001171/* If we're using split pte locks, then take the page's lock and
1172 return a pointer to it. Otherwise return NULL. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001173static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001174{
1175 spinlock_t *ptl = NULL;
1176
Jeremy Fitzhardingef7d0b922008-09-09 15:43:22 -07001177#if USE_SPLIT_PTLOCKS
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001178 ptl = __pte_lockptr(page);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001179 spin_lock_nest_lock(ptl, &mm->page_table_lock);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001180#endif
1181
1182 return ptl;
1183}
1184
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001185static void xen_pte_unlock(void *v)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001186{
1187 spinlock_t *ptl = v;
1188 spin_unlock(ptl);
1189}
1190
1191static void xen_do_pin(unsigned level, unsigned long pfn)
1192{
1193 struct mmuext_op *op;
1194 struct multicall_space mcs;
1195
1196 mcs = __xen_mc_entry(sizeof(*op));
1197 op = mcs.args;
1198 op->cmd = level;
1199 op->arg1.mfn = pfn_to_mfn(pfn);
1200 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1201}
1202
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001203static int xen_pin_page(struct mm_struct *mm, struct page *page,
1204 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001205{
Christoph Lameterd60cd462008-04-28 02:12:51 -07001206 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001207 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001208
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001209 if (pgfl)
1210 flush = 0; /* already pinned */
1211 else if (PageHighMem(page))
1212 /* kmaps need flushing if we found an unpinned
1213 highpage */
1214 flush = 1;
1215 else {
1216 void *pt = lowmem_page_address(page);
1217 unsigned long pfn = page_to_pfn(page);
1218 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001219 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001220
1221 flush = 0;
1222
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -07001223 /*
1224 * We need to hold the pagetable lock between the time
1225 * we make the pagetable RO and when we actually pin
1226 * it. If we don't, then other users may come in and
1227 * attempt to update the pagetable by writing it,
1228 * which will fail because the memory is RO but not
1229 * pinned, so Xen won't do the trap'n'emulate.
1230 *
1231 * If we're using split pte locks, we can't hold the
1232 * entire pagetable's worth of locks during the
1233 * traverse, because we may wrap the preempt count (8
1234 * bits). The solution is to mark RO and pin each PTE
1235 * page while holding the lock. This means the number
1236 * of locks we end up holding is never more than a
1237 * batch size (~32 entries, at present).
1238 *
1239 * If we're not using split pte locks, we needn't pin
1240 * the PTE pages independently, because we're
1241 * protected by the overall pagetable lock.
1242 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001243 ptl = NULL;
1244 if (level == PT_PTE)
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001245 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001246
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001247 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
1248 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001249 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
1250
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -07001251 if (ptl) {
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001252 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
1253
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001254 /* Queue a deferred unlock for when this batch
1255 is completed. */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001256 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001257 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001258 }
1259
1260 return flush;
1261}
1262
1263/* This is called just after a mm has been created, but it has not
1264 been used yet. We need to make sure that its pagetable is all
1265 read-only, and can be pinned. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001266static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001267{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001268 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001269
Ian Campbell86bbc2c2008-11-21 10:21:33 +00001270 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +11001271 /* re-enable interrupts for flushing */
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001272 xen_mc_issue(0);
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +11001273
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001274 kmap_flush_unused();
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +11001275
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001276 xen_mc_batch();
1277 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001278
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001279#ifdef CONFIG_X86_64
1280 {
1281 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1282
1283 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
1284
1285 if (user_pgd) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001286 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
Tejf63c2f22008-12-16 11:56:06 -08001287 xen_do_pin(MMUEXT_PIN_L4_TABLE,
1288 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001289 }
1290 }
1291#else /* CONFIG_X86_32 */
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001292#ifdef CONFIG_X86_PAE
1293 /* Need to make sure unshared kernel PMD is pinnable */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -08001294 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001295 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001296#endif
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +01001297 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001298#endif /* CONFIG_X86_64 */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001299 xen_mc_issue(0);
1300}
1301
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001302static void xen_pgd_pin(struct mm_struct *mm)
1303{
1304 __xen_pgd_pin(mm, mm->pgd);
1305}
1306
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001307/*
1308 * On save, we need to pin all pagetables to make sure they get their
1309 * mfns turned into pfns. Search the list for any unpinned pgds and pin
1310 * them (unpinned pgds are not currently in use, probably because the
1311 * process is under construction or destruction).
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001312 *
1313 * Expected to be called in stop_machine() ("equivalent to taking
1314 * every spinlock in the system"), so the locking doesn't really
1315 * matter all that much.
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001316 */
1317void xen_mm_pin_all(void)
1318{
1319 unsigned long flags;
1320 struct page *page;
1321
1322 spin_lock_irqsave(&pgd_lock, flags);
1323
1324 list_for_each_entry(page, &pgd_list, lru) {
1325 if (!PagePinned(page)) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001326 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001327 SetPageSavePinned(page);
1328 }
1329 }
1330
1331 spin_unlock_irqrestore(&pgd_lock, flags);
1332}
1333
Eduardo Habkostc1f2f092008-07-08 15:06:24 -07001334/*
1335 * The init_mm pagetable is really pinned as soon as its created, but
1336 * that's before we have page structures to store the bits. So do all
1337 * the book-keeping now.
1338 */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001339static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page,
1340 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001341{
1342 SetPagePinned(page);
1343 return 0;
1344}
1345
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001346static void __init xen_mark_init_mm_pinned(void)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001347{
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001348 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001349}
1350
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001351static int xen_unpin_page(struct mm_struct *mm, struct page *page,
1352 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001353{
Christoph Lameterd60cd462008-04-28 02:12:51 -07001354 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001355
1356 if (pgfl && !PageHighMem(page)) {
1357 void *pt = lowmem_page_address(page);
1358 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001359 spinlock_t *ptl = NULL;
1360 struct multicall_space mcs;
1361
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -07001362 /*
1363 * Do the converse to pin_page. If we're using split
1364 * pte locks, we must be holding the lock for while
1365 * the pte page is unpinned but still RO to prevent
1366 * concurrent updates from seeing it in this
1367 * partially-pinned state.
1368 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001369 if (level == PT_PTE) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001370 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001371
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -07001372 if (ptl)
1373 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001374 }
1375
1376 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001377
1378 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
1379 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001380 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
1381
1382 if (ptl) {
1383 /* unlock when batch completed */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001384 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001385 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001386 }
1387
1388 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001389}
1390
1391/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001392static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001393{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001394 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001395
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001396 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001397
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001398#ifdef CONFIG_X86_64
1399 {
1400 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1401
1402 if (user_pgd) {
Tejf63c2f22008-12-16 11:56:06 -08001403 xen_do_pin(MMUEXT_UNPIN_TABLE,
1404 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001405 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001406 }
1407 }
1408#endif
1409
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001410#ifdef CONFIG_X86_PAE
1411 /* Need to make sure unshared kernel PMD is unpinned */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -08001412 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001413 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001414#endif
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001415
Ian Campbell86bbc2c2008-11-21 10:21:33 +00001416 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001417
1418 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001419}
1420
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001421static void xen_pgd_unpin(struct mm_struct *mm)
1422{
1423 __xen_pgd_unpin(mm, mm->pgd);
1424}
1425
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001426/*
1427 * On resume, undo any pinning done at save, so that the rest of the
1428 * kernel doesn't see any unexpected pinned pagetables.
1429 */
1430void xen_mm_unpin_all(void)
1431{
1432 unsigned long flags;
1433 struct page *page;
1434
1435 spin_lock_irqsave(&pgd_lock, flags);
1436
1437 list_for_each_entry(page, &pgd_list, lru) {
1438 if (PageSavePinned(page)) {
1439 BUG_ON(!PagePinned(page));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001440 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001441 ClearPageSavePinned(page);
1442 }
1443 }
1444
1445 spin_unlock_irqrestore(&pgd_lock, flags);
1446}
1447
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001448void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
1449{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001450 spin_lock(&next->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001451 xen_pgd_pin(next);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001452 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001453}
1454
1455void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
1456{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001457 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001458 xen_pgd_pin(mm);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001459 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001460}
1461
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001462
1463#ifdef CONFIG_SMP
1464/* Another cpu may still have their %cr3 pointing at the pagetable, so
1465 we need to repoint it somewhere else before we can unpin it. */
1466static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001467{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001468 struct mm_struct *mm = info;
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001469 struct mm_struct *active_mm;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001470
Brian Gerst9eb912d2009-01-19 00:38:57 +09001471 active_mm = percpu_read(cpu_tlbstate.active_mm);
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001472
1473 if (active_mm == mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001474 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001475
1476 /* If this cpu still has a stale cr3 reference, then make sure
1477 it has been flushed. */
Jeremy Fitzhardinge7fd7d832009-02-17 23:24:03 -08001478 if (percpu_read(xen_current_cr3) == __pa(mm->pgd))
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001479 load_cr3(swapper_pg_dir);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001480}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001481
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001482static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001483{
Mike Travise4d98202008-12-16 17:34:05 -08001484 cpumask_var_t mask;
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001485 unsigned cpu;
1486
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001487 if (current->active_mm == mm) {
1488 if (current->mm == mm)
1489 load_cr3(swapper_pg_dir);
1490 else
1491 leave_mm(smp_processor_id());
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001492 }
1493
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001494 /* Get the "official" set of cpus referring to our pagetable. */
Mike Travise4d98202008-12-16 17:34:05 -08001495 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1496 for_each_online_cpu(cpu) {
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001497 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
Mike Travise4d98202008-12-16 17:34:05 -08001498 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1499 continue;
1500 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1501 }
1502 return;
1503 }
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001504 cpumask_copy(mask, mm_cpumask(mm));
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001505
1506 /* It's possible that a vcpu may have a stale reference to our
1507 cr3, because its in lazy mode, and it hasn't yet flushed
1508 its set of pending hypercalls yet. In this case, we can
1509 look at its actual current cr3 value, and force it to flush
1510 if needed. */
1511 for_each_online_cpu(cpu) {
1512 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
Mike Travise4d98202008-12-16 17:34:05 -08001513 cpumask_set_cpu(cpu, mask);
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001514 }
1515
Mike Travise4d98202008-12-16 17:34:05 -08001516 if (!cpumask_empty(mask))
1517 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1518 free_cpumask_var(mask);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001519}
1520#else
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001521static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001522{
1523 if (current->active_mm == mm)
1524 load_cr3(swapper_pg_dir);
1525}
1526#endif
1527
1528/*
1529 * While a process runs, Xen pins its pagetables, which means that the
1530 * hypervisor forces it to be read-only, and it controls all updates
1531 * to it. This means that all pagetable updates have to go via the
1532 * hypervisor, which is moderately expensive.
1533 *
1534 * Since we're pulling the pagetable down, we switch to use init_mm,
1535 * unpin old process pagetable and mark it all read-write, which
1536 * allows further operations on it to be simple memory accesses.
1537 *
1538 * The only subtle point is that another CPU may be still using the
1539 * pagetable because of lazy tlb flushing. This means we need need to
1540 * switch all CPUs off this pagetable before we can unpin it.
1541 */
1542void xen_exit_mmap(struct mm_struct *mm)
1543{
1544 get_cpu(); /* make sure we don't move around */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001545 xen_drop_mm_ref(mm);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001546 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001547
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001548 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -07001549
1550 /* pgd may not be pinned in the error exit path of execve */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001551 if (xen_page_pinned(mm->pgd))
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001552 xen_pgd_unpin(mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001553
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001554 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001555}
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07001556
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001557static __init void xen_pagetable_setup_start(pgd_t *base)
1558{
1559}
1560
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02001561static void xen_post_allocator_init(void);
1562
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001563static __init void xen_pagetable_setup_done(pgd_t *base)
1564{
1565 xen_setup_shared_info();
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02001566 xen_post_allocator_init();
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001567}
1568
1569static void xen_write_cr2(unsigned long cr2)
1570{
1571 percpu_read(xen_vcpu)->arch.cr2 = cr2;
1572}
1573
1574static unsigned long xen_read_cr2(void)
1575{
1576 return percpu_read(xen_vcpu)->arch.cr2;
1577}
1578
1579unsigned long xen_read_cr2_direct(void)
1580{
1581 return percpu_read(xen_vcpu_info.arch.cr2);
1582}
1583
1584static void xen_flush_tlb(void)
1585{
1586 struct mmuext_op *op;
1587 struct multicall_space mcs;
1588
1589 preempt_disable();
1590
1591 mcs = xen_mc_entry(sizeof(*op));
1592
1593 op = mcs.args;
1594 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1595 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1596
1597 xen_mc_issue(PARAVIRT_LAZY_MMU);
1598
1599 preempt_enable();
1600}
1601
1602static void xen_flush_tlb_single(unsigned long addr)
1603{
1604 struct mmuext_op *op;
1605 struct multicall_space mcs;
1606
1607 preempt_disable();
1608
1609 mcs = xen_mc_entry(sizeof(*op));
1610 op = mcs.args;
1611 op->cmd = MMUEXT_INVLPG_LOCAL;
1612 op->arg1.linear_addr = addr & PAGE_MASK;
1613 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1614
1615 xen_mc_issue(PARAVIRT_LAZY_MMU);
1616
1617 preempt_enable();
1618}
1619
1620static void xen_flush_tlb_others(const struct cpumask *cpus,
1621 struct mm_struct *mm, unsigned long va)
1622{
1623 struct {
1624 struct mmuext_op op;
1625 DECLARE_BITMAP(mask, NR_CPUS);
1626 } *args;
1627 struct multicall_space mcs;
1628
Jeremy Fitzhardingee3f8a742009-03-04 17:36:57 -08001629 if (cpumask_empty(cpus))
1630 return; /* nothing to do */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001631
1632 mcs = xen_mc_entry(sizeof(*args));
1633 args = mcs.args;
1634 args->op.arg2.vcpumask = to_cpumask(args->mask);
1635
1636 /* Remove us, and any offline CPUS. */
1637 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1638 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001639
1640 if (va == TLB_FLUSH_ALL) {
1641 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1642 } else {
1643 args->op.cmd = MMUEXT_INVLPG_MULTI;
1644 args->op.arg1.linear_addr = va;
1645 }
1646
1647 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1648
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001649 xen_mc_issue(PARAVIRT_LAZY_MMU);
1650}
1651
1652static unsigned long xen_read_cr3(void)
1653{
1654 return percpu_read(xen_cr3);
1655}
1656
1657static void set_current_cr3(void *v)
1658{
1659 percpu_write(xen_current_cr3, (unsigned long)v);
1660}
1661
1662static void __xen_write_cr3(bool kernel, unsigned long cr3)
1663{
1664 struct mmuext_op *op;
1665 struct multicall_space mcs;
1666 unsigned long mfn;
1667
1668 if (cr3)
1669 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1670 else
1671 mfn = 0;
1672
1673 WARN_ON(mfn == 0 && kernel);
1674
1675 mcs = __xen_mc_entry(sizeof(*op));
1676
1677 op = mcs.args;
1678 op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1679 op->arg1.mfn = mfn;
1680
1681 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1682
1683 if (kernel) {
1684 percpu_write(xen_cr3, cr3);
1685
1686 /* Update xen_current_cr3 once the batch has actually
1687 been submitted. */
1688 xen_mc_callback(set_current_cr3, (void *)cr3);
1689 }
1690}
1691
1692static void xen_write_cr3(unsigned long cr3)
1693{
1694 BUG_ON(preemptible());
1695
1696 xen_mc_batch(); /* disables interrupts */
1697
1698 /* Update while interrupts are disabled, so its atomic with
1699 respect to ipis */
1700 percpu_write(xen_cr3, cr3);
1701
1702 __xen_write_cr3(true, cr3);
1703
1704#ifdef CONFIG_X86_64
1705 {
1706 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1707 if (user_pgd)
1708 __xen_write_cr3(false, __pa(user_pgd));
1709 else
1710 __xen_write_cr3(false, 0);
1711 }
1712#endif
1713
1714 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1715}
1716
1717static int xen_pgd_alloc(struct mm_struct *mm)
1718{
1719 pgd_t *pgd = mm->pgd;
1720 int ret = 0;
1721
1722 BUG_ON(PagePinned(virt_to_page(pgd)));
1723
1724#ifdef CONFIG_X86_64
1725 {
1726 struct page *page = virt_to_page(pgd);
1727 pgd_t *user_pgd;
1728
1729 BUG_ON(page->private != 0);
1730
1731 ret = -ENOMEM;
1732
1733 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1734 page->private = (unsigned long)user_pgd;
1735
1736 if (user_pgd != NULL) {
1737 user_pgd[pgd_index(VSYSCALL_START)] =
1738 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1739 ret = 0;
1740 }
1741
1742 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1743 }
1744#endif
1745
1746 return ret;
1747}
1748
1749static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1750{
1751#ifdef CONFIG_X86_64
1752 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1753
1754 if (user_pgd)
1755 free_page((unsigned long)user_pgd);
1756#endif
1757}
1758
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001759#ifdef CONFIG_X86_32
1760static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1761{
1762 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1763 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1764 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1765 pte_val_ma(pte));
1766
1767 return pte;
1768}
1769
1770/* Init-time set_pte while constructing initial pagetables, which
1771 doesn't allow RO pagetable pages to be remapped RW */
1772static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
1773{
1774 pte = mask_rw_pte(ptep, pte);
1775
1776 xen_set_pte(ptep, pte);
1777}
1778#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001779
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001780static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1781{
1782 struct mmuext_op op;
1783 op.cmd = cmd;
1784 op.arg1.mfn = pfn_to_mfn(pfn);
1785 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1786 BUG();
1787}
1788
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001789/* Early in boot, while setting up the initial pagetable, assume
1790 everything is pinned. */
1791static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1792{
1793#ifdef CONFIG_FLATMEM
1794 BUG_ON(mem_map); /* should only be used early */
1795#endif
1796 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001797 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1798}
1799
1800/* Used for pmd and pud */
1801static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1802{
1803#ifdef CONFIG_FLATMEM
1804 BUG_ON(mem_map); /* should only be used early */
1805#endif
1806 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001807}
1808
1809/* Early release_pte assumes that all pts are pinned, since there's
1810 only init_mm and anything attached to that is pinned. */
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001811static __init void xen_release_pte_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001812{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001813 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001814 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1815}
1816
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001817static __init void xen_release_pmd_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001818{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001819 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001820}
1821
1822/* This needs to make sure the new pte page is pinned iff its being
1823 attached to a pinned pagetable. */
1824static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
1825{
1826 struct page *page = pfn_to_page(pfn);
1827
1828 if (PagePinned(virt_to_page(mm->pgd))) {
1829 SetPagePinned(page);
1830
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001831 if (!PageHighMem(page)) {
1832 make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
1833 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1834 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1835 } else {
1836 /* make sure there are no stray mappings of
1837 this page */
1838 kmap_flush_unused();
1839 }
1840 }
1841}
1842
1843static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1844{
1845 xen_alloc_ptpage(mm, pfn, PT_PTE);
1846}
1847
1848static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1849{
1850 xen_alloc_ptpage(mm, pfn, PT_PMD);
1851}
1852
1853/* This should never happen until we're OK to use struct page */
1854static void xen_release_ptpage(unsigned long pfn, unsigned level)
1855{
1856 struct page *page = pfn_to_page(pfn);
1857
1858 if (PagePinned(page)) {
1859 if (!PageHighMem(page)) {
1860 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1861 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1862 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1863 }
1864 ClearPagePinned(page);
1865 }
1866}
1867
1868static void xen_release_pte(unsigned long pfn)
1869{
1870 xen_release_ptpage(pfn, PT_PTE);
1871}
1872
1873static void xen_release_pmd(unsigned long pfn)
1874{
1875 xen_release_ptpage(pfn, PT_PMD);
1876}
1877
1878#if PAGETABLE_LEVELS == 4
1879static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1880{
1881 xen_alloc_ptpage(mm, pfn, PT_PUD);
1882}
1883
1884static void xen_release_pud(unsigned long pfn)
1885{
1886 xen_release_ptpage(pfn, PT_PUD);
1887}
1888#endif
1889
1890void __init xen_reserve_top(void)
1891{
1892#ifdef CONFIG_X86_32
1893 unsigned long top = HYPERVISOR_VIRT_START;
1894 struct xen_platform_parameters pp;
1895
1896 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1897 top = pp.virt_start;
1898
1899 reserve_top_address(-top);
1900#endif /* CONFIG_X86_32 */
1901}
1902
1903/*
1904 * Like __va(), but returns address in the kernel mapping (which is
1905 * all we have until the physical memory mapping has been set up.
1906 */
1907static void *__ka(phys_addr_t paddr)
1908{
1909#ifdef CONFIG_X86_64
1910 return (void *)(paddr + __START_KERNEL_map);
1911#else
1912 return __va(paddr);
1913#endif
1914}
1915
1916/* Convert a machine address to physical address */
1917static unsigned long m2p(phys_addr_t maddr)
1918{
1919 phys_addr_t paddr;
1920
1921 maddr &= PTE_PFN_MASK;
1922 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1923
1924 return paddr;
1925}
1926
1927/* Convert a machine address to kernel virtual */
1928static void *m2v(phys_addr_t maddr)
1929{
1930 return __ka(m2p(maddr));
1931}
1932
1933static void set_page_prot(void *addr, pgprot_t prot)
1934{
1935 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1936 pte_t pte = pfn_pte(pfn, prot);
1937
1938 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1939 BUG();
1940}
1941
1942static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1943{
1944 unsigned pmdidx, pteidx;
1945 unsigned ident_pte;
1946 unsigned long pfn;
1947
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001948 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1949 PAGE_SIZE);
1950
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001951 ident_pte = 0;
1952 pfn = 0;
1953 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1954 pte_t *pte_page;
1955
1956 /* Reuse or allocate a page of ptes */
1957 if (pmd_present(pmd[pmdidx]))
1958 pte_page = m2v(pmd[pmdidx].pmd);
1959 else {
1960 /* Check for free pte pages */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001961 if (ident_pte == LEVEL1_IDENT_ENTRIES)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001962 break;
1963
1964 pte_page = &level1_ident_pgt[ident_pte];
1965 ident_pte += PTRS_PER_PTE;
1966
1967 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1968 }
1969
1970 /* Install mappings */
1971 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1972 pte_t pte;
1973
1974 if (pfn > max_pfn_mapped)
1975 max_pfn_mapped = pfn;
1976
1977 if (!pte_none(pte_page[pteidx]))
1978 continue;
1979
1980 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1981 pte_page[pteidx] = pte;
1982 }
1983 }
1984
1985 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1986 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1987
1988 set_page_prot(pmd, PAGE_KERNEL_RO);
1989}
1990
1991#ifdef CONFIG_X86_64
1992static void convert_pfn_mfn(void *v)
1993{
1994 pte_t *pte = v;
1995 int i;
1996
1997 /* All levels are converted the same way, so just treat them
1998 as ptes. */
1999 for (i = 0; i < PTRS_PER_PTE; i++)
2000 pte[i] = xen_make_pte(pte[i].pte);
2001}
2002
2003/*
2004 * Set up the inital kernel pagetable.
2005 *
2006 * We can construct this by grafting the Xen provided pagetable into
2007 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
2008 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
2009 * means that only the kernel has a physical mapping to start with -
2010 * but that's enough to get __va working. We need to fill in the rest
2011 * of the physical mapping once some sort of allocator has been set
2012 * up.
2013 */
2014__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
2015 unsigned long max_pfn)
2016{
2017 pud_t *l3;
2018 pmd_t *l2;
2019
2020 /* Zap identity mapping */
2021 init_level4_pgt[0] = __pgd(0);
2022
2023 /* Pre-constructed entries are in pfn, so convert to mfn */
2024 convert_pfn_mfn(init_level4_pgt);
2025 convert_pfn_mfn(level3_ident_pgt);
2026 convert_pfn_mfn(level3_kernel_pgt);
2027
2028 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
2029 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
2030
2031 memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
2032 memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
2033
2034 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
2035 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
2036 memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
2037
2038 /* Set up identity map */
2039 xen_map_identity_early(level2_ident_pgt, max_pfn);
2040
2041 /* Make pagetable pieces RO */
2042 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
2043 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
2044 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
2045 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
2046 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
2047 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
2048
2049 /* Pin down new L4 */
2050 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
2051 PFN_DOWN(__pa_symbol(init_level4_pgt)));
2052
2053 /* Unpin Xen-provided one */
2054 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
2055
2056 /* Switch over */
2057 pgd = init_level4_pgt;
2058
2059 /*
2060 * At this stage there can be no user pgd, and no page
2061 * structure to attach it to, so make sure we just set kernel
2062 * pgd.
2063 */
2064 xen_mc_batch();
2065 __xen_write_cr3(true, __pa(pgd));
2066 xen_mc_issue(PARAVIRT_LAZY_CPU);
2067
2068 reserve_early(__pa(xen_start_info->pt_base),
2069 __pa(xen_start_info->pt_base +
2070 xen_start_info->nr_pt_frames * PAGE_SIZE),
2071 "XEN PAGETABLES");
2072
2073 return pgd;
2074}
2075#else /* !CONFIG_X86_64 */
Jeremy Fitzhardingef0991802010-08-26 16:16:28 -07002076static RESERVE_BRK_ARRAY(pmd_t, level2_kernel_pgt, PTRS_PER_PMD);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002077
2078__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
2079 unsigned long max_pfn)
2080{
2081 pmd_t *kernel_pmd;
2082
Jeremy Fitzhardingef0991802010-08-26 16:16:28 -07002083 level2_kernel_pgt = extend_brk(sizeof(pmd_t *) * PTRS_PER_PMD, PAGE_SIZE);
2084
Jeremy Fitzhardinge93dbda72009-02-26 17:35:44 -08002085 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
2086 xen_start_info->nr_pt_frames * PAGE_SIZE +
2087 512*1024);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002088
2089 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
2090 memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
2091
2092 xen_map_identity_early(level2_kernel_pgt, max_pfn);
2093
2094 memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
2095 set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY],
2096 __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT));
2097
2098 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
2099 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
2100 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
2101
2102 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
2103
2104 xen_write_cr3(__pa(swapper_pg_dir));
2105
2106 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));
2107
Jeremy Fitzhardinge33df4db2009-05-07 11:56:44 -07002108 reserve_early(__pa(xen_start_info->pt_base),
2109 __pa(xen_start_info->pt_base +
2110 xen_start_info->nr_pt_frames * PAGE_SIZE),
2111 "XEN PAGETABLES");
2112
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002113 return swapper_pg_dir;
2114}
2115#endif /* CONFIG_X86_64 */
2116
Masami Hiramatsu3b3809a2009-04-09 10:55:33 -07002117static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002118{
2119 pte_t pte;
2120
2121 phys >>= PAGE_SHIFT;
2122
2123 switch (idx) {
2124 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
2125#ifdef CONFIG_X86_F00F_BUG
2126 case FIX_F00F_IDT:
2127#endif
2128#ifdef CONFIG_X86_32
2129 case FIX_WP_TEST:
2130 case FIX_VDSO:
2131# ifdef CONFIG_HIGHMEM
2132 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
2133# endif
2134#else
2135 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
2136#endif
2137#ifdef CONFIG_X86_LOCAL_APIC
2138 case FIX_APIC_BASE: /* maps dummy local APIC */
2139#endif
Jeremy Fitzhardinge3ecb1b72009-03-07 23:48:41 -08002140 case FIX_TEXT_POKE0:
2141 case FIX_TEXT_POKE1:
2142 /* All local page mappings */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002143 pte = pfn_pte(phys, prot);
2144 break;
2145
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08002146 case FIX_PARAVIRT_BOOTMAP:
2147 /* This is an MFN, but it isn't an IO mapping from the
2148 IO domain */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002149 pte = mfn_pte(phys, prot);
2150 break;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08002151
2152 default:
2153 /* By default, set_fixmap is used for hardware mappings */
2154 pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP));
2155 break;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002156 }
2157
2158 __native_set_fixmap(idx, pte);
2159
2160#ifdef CONFIG_X86_64
2161 /* Replicate changes to map the vsyscall page into the user
2162 pagetable vsyscall mapping. */
2163 if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) {
2164 unsigned long vaddr = __fix_to_virt(idx);
2165 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
2166 }
2167#endif
2168}
2169
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02002170static __init void xen_post_allocator_init(void)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002171{
2172 pv_mmu_ops.set_pte = xen_set_pte;
2173 pv_mmu_ops.set_pmd = xen_set_pmd;
2174 pv_mmu_ops.set_pud = xen_set_pud;
2175#if PAGETABLE_LEVELS == 4
2176 pv_mmu_ops.set_pgd = xen_set_pgd;
2177#endif
2178
2179 /* This will work as long as patching hasn't happened yet
2180 (which it hasn't) */
2181 pv_mmu_ops.alloc_pte = xen_alloc_pte;
2182 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
2183 pv_mmu_ops.release_pte = xen_release_pte;
2184 pv_mmu_ops.release_pmd = xen_release_pmd;
2185#if PAGETABLE_LEVELS == 4
2186 pv_mmu_ops.alloc_pud = xen_alloc_pud;
2187 pv_mmu_ops.release_pud = xen_release_pud;
2188#endif
2189
2190#ifdef CONFIG_X86_64
2191 SetPagePinned(virt_to_page(level3_user_vsyscall));
2192#endif
2193 xen_mark_init_mm_pinned();
2194}
2195
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002196static void xen_leave_lazy_mmu(void)
2197{
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08002198 preempt_disable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002199 xen_mc_flush();
2200 paravirt_leave_lazy_mmu();
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08002201 preempt_enable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002202}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002203
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002204static const struct pv_mmu_ops xen_mmu_ops __initdata = {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002205 .read_cr2 = xen_read_cr2,
2206 .write_cr2 = xen_write_cr2,
2207
2208 .read_cr3 = xen_read_cr3,
2209 .write_cr3 = xen_write_cr3,
2210
2211 .flush_tlb_user = xen_flush_tlb,
2212 .flush_tlb_kernel = xen_flush_tlb,
2213 .flush_tlb_single = xen_flush_tlb_single,
2214 .flush_tlb_others = xen_flush_tlb_others,
2215
2216 .pte_update = paravirt_nop,
2217 .pte_update_defer = paravirt_nop,
2218
2219 .pgd_alloc = xen_pgd_alloc,
2220 .pgd_free = xen_pgd_free,
2221
2222 .alloc_pte = xen_alloc_pte_init,
2223 .release_pte = xen_release_pte_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002224 .alloc_pmd = xen_alloc_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002225 .alloc_pmd_clone = paravirt_nop,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002226 .release_pmd = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002227
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002228#ifdef CONFIG_X86_64
2229 .set_pte = xen_set_pte,
2230#else
2231 .set_pte = xen_set_pte_init,
2232#endif
2233 .set_pte_at = xen_set_pte_at,
2234 .set_pmd = xen_set_pmd_hyper,
2235
2236 .ptep_modify_prot_start = __ptep_modify_prot_start,
2237 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2238
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002239 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2240 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002241
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002242 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
2243 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002244
2245#ifdef CONFIG_X86_PAE
2246 .set_pte_atomic = xen_set_pte_atomic,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002247 .pte_clear = xen_pte_clear,
2248 .pmd_clear = xen_pmd_clear,
2249#endif /* CONFIG_X86_PAE */
2250 .set_pud = xen_set_pud_hyper,
2251
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002252 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2253 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002254
2255#if PAGETABLE_LEVELS == 4
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002256 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2257 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002258 .set_pgd = xen_set_pgd_hyper,
2259
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002260 .alloc_pud = xen_alloc_pmd_init,
2261 .release_pud = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002262#endif /* PAGETABLE_LEVELS == 4 */
2263
2264 .activate_mm = xen_activate_mm,
2265 .dup_mmap = xen_dup_mmap,
2266 .exit_mmap = xen_exit_mmap,
2267
2268 .lazy_mode = {
2269 .enter = paravirt_enter_lazy_mmu,
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002270 .leave = xen_leave_lazy_mmu,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002271 },
2272
2273 .set_fixmap = xen_set_fixmap,
2274};
2275
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002276void __init xen_init_mmu_ops(void)
2277{
2278 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
2279 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
2280 pv_mmu_ops = xen_mmu_ops;
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -07002281
2282 vmap_lazy_unmap = false;
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002283}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002284
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002285/* Protected by xen_reservation_lock. */
2286#define MAX_CONTIG_ORDER 9 /* 2MB */
2287static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2288
2289#define VOID_PTE (mfn_pte(0, __pgprot(0)))
2290static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2291 unsigned long *in_frames,
2292 unsigned long *out_frames)
2293{
2294 int i;
2295 struct multicall_space mcs;
2296
2297 xen_mc_batch();
2298 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2299 mcs = __xen_mc_entry(0);
2300
2301 if (in_frames)
2302 in_frames[i] = virt_to_mfn(vaddr);
2303
2304 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
2305 set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
2306
2307 if (out_frames)
2308 out_frames[i] = virt_to_pfn(vaddr);
2309 }
2310 xen_mc_issue(0);
2311}
2312
2313/*
2314 * Update the pfn-to-mfn mappings for a virtual address range, either to
2315 * point to an array of mfns, or contiguously from a single starting
2316 * mfn.
2317 */
2318static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2319 unsigned long *mfns,
2320 unsigned long first_mfn)
2321{
2322 unsigned i, limit;
2323 unsigned long mfn;
2324
2325 xen_mc_batch();
2326
2327 limit = 1u << order;
2328 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2329 struct multicall_space mcs;
2330 unsigned flags;
2331
2332 mcs = __xen_mc_entry(0);
2333 if (mfns)
2334 mfn = mfns[i];
2335 else
2336 mfn = first_mfn + i;
2337
2338 if (i < (limit - 1))
2339 flags = 0;
2340 else {
2341 if (order == 0)
2342 flags = UVMF_INVLPG | UVMF_ALL;
2343 else
2344 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2345 }
2346
2347 MULTI_update_va_mapping(mcs.mc, vaddr,
2348 mfn_pte(mfn, PAGE_KERNEL), flags);
2349
2350 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2351 }
2352
2353 xen_mc_issue(0);
2354}
2355
2356/*
2357 * Perform the hypercall to exchange a region of our pfns to point to
2358 * memory with the required contiguous alignment. Takes the pfns as
2359 * input, and populates mfns as output.
2360 *
2361 * Returns a success code indicating whether the hypervisor was able to
2362 * satisfy the request or not.
2363 */
2364static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2365 unsigned long *pfns_in,
2366 unsigned long extents_out,
2367 unsigned int order_out,
2368 unsigned long *mfns_out,
2369 unsigned int address_bits)
2370{
2371 long rc;
2372 int success;
2373
2374 struct xen_memory_exchange exchange = {
2375 .in = {
2376 .nr_extents = extents_in,
2377 .extent_order = order_in,
2378 .extent_start = pfns_in,
2379 .domid = DOMID_SELF
2380 },
2381 .out = {
2382 .nr_extents = extents_out,
2383 .extent_order = order_out,
2384 .extent_start = mfns_out,
2385 .address_bits = address_bits,
2386 .domid = DOMID_SELF
2387 }
2388 };
2389
2390 BUG_ON(extents_in << order_in != extents_out << order_out);
2391
2392 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2393 success = (exchange.nr_exchanged == extents_in);
2394
2395 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2396 BUG_ON(success && (rc != 0));
2397
2398 return success;
2399}
2400
2401int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
2402 unsigned int address_bits)
2403{
2404 unsigned long *in_frames = discontig_frames, out_frame;
2405 unsigned long flags;
2406 int success;
2407
2408 /*
2409 * Currently an auto-translated guest will not perform I/O, nor will
2410 * it require PAE page directories below 4GB. Therefore any calls to
2411 * this function are redundant and can be ignored.
2412 */
2413
2414 if (xen_feature(XENFEAT_auto_translated_physmap))
2415 return 0;
2416
2417 if (unlikely(order > MAX_CONTIG_ORDER))
2418 return -ENOMEM;
2419
2420 memset((void *) vstart, 0, PAGE_SIZE << order);
2421
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002422 spin_lock_irqsave(&xen_reservation_lock, flags);
2423
2424 /* 1. Zap current PTEs, remembering MFNs. */
2425 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2426
2427 /* 2. Get a new contiguous memory extent. */
2428 out_frame = virt_to_pfn(vstart);
2429 success = xen_exchange_memory(1UL << order, 0, in_frames,
2430 1, order, &out_frame,
2431 address_bits);
2432
2433 /* 3. Map the new extent in place of old pages. */
2434 if (success)
2435 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2436 else
2437 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2438
2439 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2440
2441 return success ? 0 : -ENOMEM;
2442}
2443EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2444
2445void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
2446{
2447 unsigned long *out_frames = discontig_frames, in_frame;
2448 unsigned long flags;
2449 int success;
2450
2451 if (xen_feature(XENFEAT_auto_translated_physmap))
2452 return;
2453
2454 if (unlikely(order > MAX_CONTIG_ORDER))
2455 return;
2456
2457 memset((void *) vstart, 0, PAGE_SIZE << order);
2458
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002459 spin_lock_irqsave(&xen_reservation_lock, flags);
2460
2461 /* 1. Find start MFN of contiguous extent. */
2462 in_frame = virt_to_mfn(vstart);
2463
2464 /* 2. Zap current PTEs. */
2465 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2466
2467 /* 3. Do the exchange for non-contiguous MFNs. */
2468 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2469 0, out_frames, 0);
2470
2471 /* 4. Map new pages in place of old pages. */
2472 if (success)
2473 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2474 else
2475 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2476
2477 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2478}
2479EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2480
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002481#ifdef CONFIG_XEN_PVHVM
Stefano Stabellini59151002010-06-17 14:22:52 +01002482static void xen_hvm_exit_mmap(struct mm_struct *mm)
2483{
2484 struct xen_hvm_pagetable_dying a;
2485 int rc;
2486
2487 a.domid = DOMID_SELF;
2488 a.gpa = __pa(mm->pgd);
2489 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2490 WARN_ON_ONCE(rc < 0);
2491}
2492
2493static int is_pagetable_dying_supported(void)
2494{
2495 struct xen_hvm_pagetable_dying a;
2496 int rc = 0;
2497
2498 a.domid = DOMID_SELF;
2499 a.gpa = 0x00;
2500 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2501 if (rc < 0) {
2502 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
2503 return 0;
2504 }
2505 return 1;
2506}
2507
2508void __init xen_hvm_init_mmu_ops(void)
2509{
2510 if (is_pagetable_dying_supported())
2511 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
2512}
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002513#endif
Stefano Stabellini59151002010-06-17 14:22:52 +01002514
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07002515#ifdef CONFIG_XEN_DEBUG_FS
2516
2517static struct dentry *d_mmu_debug;
2518
2519static int __init xen_mmu_debugfs(void)
2520{
2521 struct dentry *d_xen = xen_init_debugfs();
2522
2523 if (d_xen == NULL)
2524 return -ENOMEM;
2525
2526 d_mmu_debug = debugfs_create_dir("mmu", d_xen);
2527
2528 debugfs_create_u8("zero_stats", 0644, d_mmu_debug, &zero_stats);
2529
2530 debugfs_create_u32("pgd_update", 0444, d_mmu_debug, &mmu_stats.pgd_update);
2531 debugfs_create_u32("pgd_update_pinned", 0444, d_mmu_debug,
2532 &mmu_stats.pgd_update_pinned);
2533 debugfs_create_u32("pgd_update_batched", 0444, d_mmu_debug,
2534 &mmu_stats.pgd_update_pinned);
2535
2536 debugfs_create_u32("pud_update", 0444, d_mmu_debug, &mmu_stats.pud_update);
2537 debugfs_create_u32("pud_update_pinned", 0444, d_mmu_debug,
2538 &mmu_stats.pud_update_pinned);
2539 debugfs_create_u32("pud_update_batched", 0444, d_mmu_debug,
2540 &mmu_stats.pud_update_pinned);
2541
2542 debugfs_create_u32("pmd_update", 0444, d_mmu_debug, &mmu_stats.pmd_update);
2543 debugfs_create_u32("pmd_update_pinned", 0444, d_mmu_debug,
2544 &mmu_stats.pmd_update_pinned);
2545 debugfs_create_u32("pmd_update_batched", 0444, d_mmu_debug,
2546 &mmu_stats.pmd_update_pinned);
2547
2548 debugfs_create_u32("pte_update", 0444, d_mmu_debug, &mmu_stats.pte_update);
2549// debugfs_create_u32("pte_update_pinned", 0444, d_mmu_debug,
2550// &mmu_stats.pte_update_pinned);
2551 debugfs_create_u32("pte_update_batched", 0444, d_mmu_debug,
2552 &mmu_stats.pte_update_pinned);
2553
2554 debugfs_create_u32("mmu_update", 0444, d_mmu_debug, &mmu_stats.mmu_update);
2555 debugfs_create_u32("mmu_update_extended", 0444, d_mmu_debug,
2556 &mmu_stats.mmu_update_extended);
2557 xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug,
2558 mmu_stats.mmu_update_histo, 20);
2559
2560 debugfs_create_u32("set_pte_at", 0444, d_mmu_debug, &mmu_stats.set_pte_at);
2561 debugfs_create_u32("set_pte_at_batched", 0444, d_mmu_debug,
2562 &mmu_stats.set_pte_at_batched);
2563 debugfs_create_u32("set_pte_at_current", 0444, d_mmu_debug,
2564 &mmu_stats.set_pte_at_current);
2565 debugfs_create_u32("set_pte_at_kernel", 0444, d_mmu_debug,
2566 &mmu_stats.set_pte_at_kernel);
2567
2568 debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit);
2569 debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug,
2570 &mmu_stats.prot_commit_batched);
2571
2572 return 0;
2573}
2574fs_initcall(xen_mmu_debugfs);
2575
2576#endif /* CONFIG_XEN_DEBUG_FS */