blob: 21ed8d7f75a5aa6fa85970d177979457d6b94d5c [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070043#include <linux/debugfs.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044#include <linux/bug.h>
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -070045#include <linux/vmalloc.h>
Randy Dunlap44408ad2009-05-12 13:31:40 -070046#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090047#include <linux/gfp.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -070048#include <linux/memblock.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070049
50#include <asm/pgtable.h>
51#include <asm/tlbflush.h>
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -070052#include <asm/fixmap.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070053#include <asm/mmu_context.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080054#include <asm/setup.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070055#include <asm/paravirt.h>
Alex Nixon7347b402010-02-19 13:31:06 -050056#include <asm/e820.h>
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070057#include <asm/linkage.h>
Alex Nixon08bbc9d2009-02-09 12:05:46 -080058#include <asm/page.h>
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -070059#include <asm/init.h>
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -070060#include <asm/pat.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070061
62#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070063#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070064
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080065#include <xen/xen.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070066#include <xen/page.h>
67#include <xen/interface/xen.h>
Stefano Stabellini59151002010-06-17 14:22:52 +010068#include <xen/interface/hvm/hvm_op.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080069#include <xen/interface/version.h>
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080070#include <xen/interface/memory.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080071#include <xen/hvc-console.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070072
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070073#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070074#include "mmu.h"
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070075#include "debugfs.h"
76
77#define MMU_UPDATE_HISTO 30
78
Alex Nixon19001c82009-02-09 12:05:46 -080079/*
80 * Protects atomic reservation decrease/increase against concurrent increases.
81 * Also protects non-atomic updates of current_pages and driver_pages, and
82 * balloon lists.
83 */
84DEFINE_SPINLOCK(xen_reservation_lock);
85
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070086#ifdef CONFIG_XEN_DEBUG_FS
87
88static struct {
89 u32 pgd_update;
90 u32 pgd_update_pinned;
91 u32 pgd_update_batched;
92
93 u32 pud_update;
94 u32 pud_update_pinned;
95 u32 pud_update_batched;
96
97 u32 pmd_update;
98 u32 pmd_update_pinned;
99 u32 pmd_update_batched;
100
101 u32 pte_update;
102 u32 pte_update_pinned;
103 u32 pte_update_batched;
104
105 u32 mmu_update;
106 u32 mmu_update_extended;
107 u32 mmu_update_histo[MMU_UPDATE_HISTO];
108
109 u32 prot_commit;
110 u32 prot_commit_batched;
111
112 u32 set_pte_at;
113 u32 set_pte_at_batched;
114 u32 set_pte_at_pinned;
115 u32 set_pte_at_current;
116 u32 set_pte_at_kernel;
117} mmu_stats;
118
119static u8 zero_stats;
120
121static inline void check_zero(void)
122{
123 if (unlikely(zero_stats)) {
124 memset(&mmu_stats, 0, sizeof(mmu_stats));
125 zero_stats = 0;
126 }
127}
128
129#define ADD_STATS(elem, val) \
130 do { check_zero(); mmu_stats.elem += (val); } while(0)
131
132#else /* !CONFIG_XEN_DEBUG_FS */
133
134#define ADD_STATS(elem, val) do { (void)(val); } while(0)
135
136#endif /* CONFIG_XEN_DEBUG_FS */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700137
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -0800138
139/*
140 * Identity map, in addition to plain kernel map. This needs to be
141 * large enough to allocate page table pages to allocate the rest.
142 * Each page can map 2MB.
143 */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -0700144#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
145static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -0800146
147#ifdef CONFIG_X86_64
148/* l3 pud for userspace vsyscall mapping */
149static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
150#endif /* CONFIG_X86_64 */
151
152/*
153 * Note about cr3 (pagetable base) values:
154 *
155 * xen_cr3 contains the current logical cr3 value; it contains the
156 * last set cr3. This may not be the current effective cr3, because
157 * its update may be being lazily deferred. However, a vcpu looking
158 * at its own cr3 can use this value knowing that it everything will
159 * be self-consistent.
160 *
161 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
162 * hypercall to set the vcpu cr3 is complete (so it may be a little
163 * out of date, but it will never be set early). If one vcpu is
164 * looking at another vcpu's cr3 value, it should use this variable.
165 */
166DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
167DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
168
169
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700170/*
171 * Just beyond the highest usermode address. STACK_TOP_MAX has a
172 * redzone above it, so round it up to a PGD boundary.
173 */
174#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
175
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700176/*
177 * Xen leaves the responsibility for maintaining p2m mappings to the
178 * guests themselves, but it must also access and update the p2m array
179 * during suspend/resume when all the pages are reallocated.
180 *
181 * The p2m table is logically a flat array, but we implement it as a
182 * three-level tree to allow the address space to be sparse.
183 *
184 * Xen
185 * |
186 * p2m_top p2m_top_mfn
187 * / \ / \
188 * p2m_mid p2m_mid p2m_mid_mfn p2m_mid_mfn
189 * / \ / \ / /
190 * p2m p2m p2m p2m p2m p2m p2m ...
191 *
Ian Campbell375b2a92010-10-21 11:00:46 +0100192 * The p2m_mid_mfn pages are mapped by p2m_top_mfn_p.
193 *
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700194 * The p2m_top and p2m_top_mfn levels are limited to 1 page, so the
195 * maximum representable pseudo-physical address space is:
196 * P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages
197 *
198 * P2M_PER_PAGE depends on the architecture, as a mfn is always
199 * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to
200 * 512 and 1024 entries respectively.
201 */
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700202
Jeremy Fitzhardinge2f7acb22010-09-15 13:32:49 -0700203unsigned long xen_max_p2m_pfn __read_mostly;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100204
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700205#define P2M_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
206#define P2M_MID_PER_PAGE (PAGE_SIZE / sizeof(unsigned long *))
207#define P2M_TOP_PER_PAGE (PAGE_SIZE / sizeof(unsigned long **))
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100208
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700209#define MAX_P2M_PFN (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100210
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700211/* Placeholders for holes in the address space */
212static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE);
213static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE);
214static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_missing_mfn, P2M_MID_PER_PAGE);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100215
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700216static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE);
217static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE);
Ian Campbell375b2a92010-10-21 11:00:46 +0100218static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE);
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700219
220RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
221RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100222
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100223static inline unsigned p2m_top_index(unsigned long pfn)
224{
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700225 BUG_ON(pfn >= MAX_P2M_PFN);
226 return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE);
227}
228
229static inline unsigned p2m_mid_index(unsigned long pfn)
230{
231 return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100232}
233
234static inline unsigned p2m_index(unsigned long pfn)
235{
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700236 return pfn % P2M_PER_PAGE;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100237}
238
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700239static void p2m_top_init(unsigned long ***top)
240{
241 unsigned i;
242
243 for (i = 0; i < P2M_TOP_PER_PAGE; i++)
244 top[i] = p2m_mid_missing;
245}
246
247static void p2m_top_mfn_init(unsigned long *top)
248{
249 unsigned i;
250
251 for (i = 0; i < P2M_TOP_PER_PAGE; i++)
252 top[i] = virt_to_mfn(p2m_mid_missing_mfn);
253}
254
Ian Campbell375b2a92010-10-21 11:00:46 +0100255static void p2m_top_mfn_p_init(unsigned long **top)
256{
257 unsigned i;
258
259 for (i = 0; i < P2M_TOP_PER_PAGE; i++)
260 top[i] = p2m_mid_missing_mfn;
261}
262
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700263static void p2m_mid_init(unsigned long **mid)
264{
265 unsigned i;
266
267 for (i = 0; i < P2M_MID_PER_PAGE; i++)
268 mid[i] = p2m_missing;
269}
270
271static void p2m_mid_mfn_init(unsigned long *mid)
272{
273 unsigned i;
274
275 for (i = 0; i < P2M_MID_PER_PAGE; i++)
276 mid[i] = virt_to_mfn(p2m_missing);
277}
278
279static void p2m_init(unsigned long *p2m)
280{
281 unsigned i;
282
283 for (i = 0; i < P2M_MID_PER_PAGE; i++)
284 p2m[i] = INVALID_P2M_ENTRY;
285}
286
287/*
288 * Build the parallel p2m_top_mfn and p2m_mid_mfn structures
289 *
290 * This is called both at boot time, and after resuming from suspend:
291 * - At boot time we're called very early, and must use extend_brk()
292 * to allocate memory.
293 *
294 * - After resume we're called from within stop_machine, but the mfn
295 * tree should alreay be completely allocated.
296 */
Ian Campbellfa24ba62009-11-21 11:32:49 +0000297void xen_build_mfn_list_list(void)
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100298{
Ian Campbell375b2a92010-10-21 11:00:46 +0100299 unsigned long pfn;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100300
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700301 /* Pre-initialize p2m_top_mfn to be completely missing */
302 if (p2m_top_mfn == NULL) {
303 p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
304 p2m_mid_mfn_init(p2m_mid_missing_mfn);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100305
Ian Campbell375b2a92010-10-21 11:00:46 +0100306 p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
307 p2m_top_mfn_p_init(p2m_top_mfn_p);
308
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700309 p2m_top_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
310 p2m_top_mfn_init(p2m_top_mfn);
Ian Campbell375b2a92010-10-21 11:00:46 +0100311 } else {
312 /* Reinitialise, mfn's all change after migration */
313 p2m_mid_mfn_init(p2m_mid_missing_mfn);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100314 }
315
Jeremy Fitzhardinge2f7acb22010-09-15 13:32:49 -0700316 for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) {
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700317 unsigned topidx = p2m_top_index(pfn);
318 unsigned mididx = p2m_mid_index(pfn);
319 unsigned long **mid;
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700320 unsigned long *mid_mfn_p;
321
322 mid = p2m_top[topidx];
Ian Campbell375b2a92010-10-21 11:00:46 +0100323 mid_mfn_p = p2m_top_mfn_p[topidx];
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700324
325 /* Don't bother allocating any mfn mid levels if
Ian Campbell375b2a92010-10-21 11:00:46 +0100326 * they're just missing, just update the stored mfn,
327 * since all could have changed over a migrate.
328 */
329 if (mid == p2m_mid_missing) {
330 BUG_ON(mididx);
331 BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
332 p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn);
333 pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE;
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700334 continue;
Ian Campbell375b2a92010-10-21 11:00:46 +0100335 }
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700336
337 if (mid_mfn_p == p2m_mid_missing_mfn) {
338 /*
339 * XXX boot-time only! We should never find
340 * missing parts of the mfn tree after
341 * runtime. extend_brk() will BUG if we call
342 * it too late.
343 */
344 mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
345 p2m_mid_mfn_init(mid_mfn_p);
346
Ian Campbell375b2a92010-10-21 11:00:46 +0100347 p2m_top_mfn_p[topidx] = mid_mfn_p;
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700348 }
349
Ian Campbell375b2a92010-10-21 11:00:46 +0100350 p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700351 mid_mfn_p[mididx] = virt_to_mfn(mid[mididx]);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100352 }
Jeremy Fitzhardingecdaead62009-02-27 15:34:59 -0800353}
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100354
Jeremy Fitzhardingecdaead62009-02-27 15:34:59 -0800355void xen_setup_mfn_list_list(void)
356{
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100357 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
358
359 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700360 virt_to_mfn(p2m_top_mfn);
Jeremy Fitzhardinge2f7acb22010-09-15 13:32:49 -0700361 HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100362}
363
364/* Set up p2m_top to point to the domain-builder provided p2m pages */
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100365void __init xen_build_dynamic_phys_to_machine(void)
366{
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100367 unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100368 unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
Ian Campbell375b2a92010-10-21 11:00:46 +0100369 unsigned long pfn;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100370
Jeremy Fitzhardinge2f7acb22010-09-15 13:32:49 -0700371 xen_max_p2m_pfn = max_pfn;
Jeremy Fitzhardingea2e87522010-08-26 16:08:31 -0700372
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700373 p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
374 p2m_init(p2m_missing);
Jeremy Fitzhardingea171ce62010-08-26 15:04:48 -0700375
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700376 p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
377 p2m_mid_init(p2m_mid_missing);
Jeremy Fitzhardingea171ce62010-08-26 15:04:48 -0700378
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700379 p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE);
380 p2m_top_init(p2m_top);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100381
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700382 /*
383 * The domain builder gives us a pre-constructed p2m array in
384 * mfn_list for all the pages initially given to us, so we just
385 * need to graft that into our tree structure.
386 */
387 for (pfn = 0; pfn < max_pfn; pfn += P2M_PER_PAGE) {
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100388 unsigned topidx = p2m_top_index(pfn);
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700389 unsigned mididx = p2m_mid_index(pfn);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100390
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700391 if (p2m_top[topidx] == p2m_mid_missing) {
392 unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
393 p2m_mid_init(mid);
394
395 p2m_top[topidx] = mid;
396 }
397
398 p2m_top[topidx][mididx] = &mfn_list[pfn];
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100399 }
400}
401
402unsigned long get_phys_to_machine(unsigned long pfn)
403{
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700404 unsigned topidx, mididx, idx;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100405
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700406 if (unlikely(pfn >= MAX_P2M_PFN))
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100407 return INVALID_P2M_ENTRY;
408
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100409 topidx = p2m_top_index(pfn);
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700410 mididx = p2m_mid_index(pfn);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100411 idx = p2m_index(pfn);
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700412
413 return p2m_top[topidx][mididx][idx];
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100414}
Ingo Molnar15ce60052008-06-02 13:20:11 +0200415EXPORT_SYMBOL_GPL(get_phys_to_machine);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100416
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700417static void *alloc_p2m_page(void)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100418{
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700419 return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800420}
421
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700422static void free_p2m_page(void *p)
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800423{
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700424 free_page((unsigned long)p);
425}
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800426
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700427/*
428 * Fully allocate the p2m structure for a given pfn. We need to check
429 * that both the top and mid levels are allocated, and make sure the
430 * parallel mfn tree is kept in sync. We may race with other cpus, so
431 * the new pages are installed with cmpxchg; if we lose the race then
432 * simply free the page we allocated and use the one that's there.
433 */
434static bool alloc_p2m(unsigned long pfn)
435{
436 unsigned topidx, mididx;
437 unsigned long ***top_p, **mid;
438 unsigned long *top_mfn_p, *mid_mfn;
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800439
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700440 topidx = p2m_top_index(pfn);
441 mididx = p2m_mid_index(pfn);
442
443 top_p = &p2m_top[topidx];
444 mid = *top_p;
445
446 if (mid == p2m_mid_missing) {
447 /* Mid level is missing, allocate a new one */
448 mid = alloc_p2m_page();
449 if (!mid)
450 return false;
451
452 p2m_mid_init(mid);
453
454 if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing)
455 free_p2m_page(mid);
456 }
457
458 top_mfn_p = &p2m_top_mfn[topidx];
Ian Campbell375b2a92010-10-21 11:00:46 +0100459 mid_mfn = p2m_top_mfn_p[topidx];
460
461 BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700462
463 if (mid_mfn == p2m_mid_missing_mfn) {
464 /* Separately check the mid mfn level */
465 unsigned long missing_mfn;
466 unsigned long mid_mfn_mfn;
467
468 mid_mfn = alloc_p2m_page();
469 if (!mid_mfn)
470 return false;
471
472 p2m_mid_mfn_init(mid_mfn);
Ian Campbell375b2a92010-10-21 11:00:46 +0100473
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700474 missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
475 mid_mfn_mfn = virt_to_mfn(mid_mfn);
476 if (cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn) != missing_mfn)
477 free_p2m_page(mid_mfn);
Ian Campbell375b2a92010-10-21 11:00:46 +0100478 else
479 p2m_top_mfn_p[topidx] = mid_mfn;
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700480 }
481
482 if (p2m_top[topidx][mididx] == p2m_missing) {
483 /* p2m leaf page is missing */
484 unsigned long *p2m;
485
486 p2m = alloc_p2m_page();
487 if (!p2m)
488 return false;
489
490 p2m_init(p2m);
491
492 if (cmpxchg(&mid[mididx], p2m_missing, p2m) != p2m_missing)
493 free_p2m_page(p2m);
494 else
495 mid_mfn[mididx] = virt_to_mfn(p2m);
496 }
497
498 return true;
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800499}
500
501/* Try to install p2m mapping; fail if intermediate bits missing */
502bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
503{
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700504 unsigned topidx, mididx, idx;
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800505
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700506 if (unlikely(pfn >= MAX_P2M_PFN)) {
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800507 BUG_ON(mfn != INVALID_P2M_ENTRY);
508 return true;
509 }
510
511 topidx = p2m_top_index(pfn);
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700512 mididx = p2m_mid_index(pfn);
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800513 idx = p2m_index(pfn);
Jeremy Fitzhardinge58e05022010-08-27 13:28:48 -0700514
515 if (p2m_top[topidx][mididx] == p2m_missing)
516 return mfn == INVALID_P2M_ENTRY;
517
518 p2m_top[topidx][mididx][idx] = mfn;
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800519
520 return true;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100521}
522
Jeremy Fitzhardingec3798062010-08-27 13:42:04 -0700523bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100524{
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100525 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
526 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
Jeremy Fitzhardingec3798062010-08-27 13:42:04 -0700527 return true;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100528 }
529
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800530 if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
Jeremy Fitzhardingec3798062010-08-27 13:42:04 -0700531 if (!alloc_p2m(pfn))
532 return false;
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100533
Jeremy Fitzhardingee791ca02009-02-26 15:48:33 -0800534 if (!__set_phys_to_machine(pfn, mfn))
Jeremy Fitzhardingec3798062010-08-27 13:42:04 -0700535 return false;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100536 }
Jeremy Fitzhardingec3798062010-08-27 13:42:04 -0700537
538 return true;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100539}
540
Jeremy Fitzhardinge9976b392009-02-27 09:19:26 -0800541unsigned long arbitrary_virt_to_mfn(void *vaddr)
542{
543 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
544
545 return PFN_DOWN(maddr.maddr);
546}
547
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700548xmaddr_t arbitrary_virt_to_machine(void *vaddr)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700549{
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700550 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100551 unsigned int level;
Chris Lalancette9f32d212008-10-23 17:40:25 -0700552 pte_t *pte;
553 unsigned offset;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700554
Chris Lalancette9f32d212008-10-23 17:40:25 -0700555 /*
556 * if the PFN is in the linear mapped vaddr range, we can just use
557 * the (quick) virt_to_machine() p2m lookup
558 */
559 if (virt_addr_valid(vaddr))
560 return virt_to_machine(vaddr);
561
562 /* otherwise we have to do a (slower) full page-table walk */
563
564 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700565 BUG_ON(pte == NULL);
Chris Lalancette9f32d212008-10-23 17:40:25 -0700566 offset = address & ~PAGE_MASK;
Jeremy Fitzhardingeebd879e2008-07-08 15:06:54 -0700567 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700568}
569
570void make_lowmem_page_readonly(void *vaddr)
571{
572 pte_t *pte, ptev;
573 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100574 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700575
Ingo Molnarf0646e42008-01-30 13:33:43 +0100576 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700577 if (pte == NULL)
578 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700579
580 ptev = pte_wrprotect(*pte);
581
582 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
583 BUG();
584}
585
586void make_lowmem_page_readwrite(void *vaddr)
587{
588 pte_t *pte, ptev;
589 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100590 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700591
Ingo Molnarf0646e42008-01-30 13:33:43 +0100592 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700593 if (pte == NULL)
594 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700595
596 ptev = pte_mkwrite(*pte);
597
598 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
599 BUG();
600}
601
602
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700603static bool xen_page_pinned(void *ptr)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100604{
605 struct page *page = virt_to_page(ptr);
606
607 return PagePinned(page);
608}
609
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800610static bool xen_iomap_pte(pte_t pte)
611{
Alex Nixon7347b402010-02-19 13:31:06 -0500612 return pte_flags(pte) & _PAGE_IOMAP;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800613}
614
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800615void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800616{
617 struct multicall_space mcs;
618 struct mmu_update *u;
619
620 mcs = xen_mc_entry(sizeof(*u));
621 u = mcs.args;
622
623 /* ptep might be kmapped when using 32-bit HIGHPTE */
624 u->ptr = arbitrary_virt_to_machine(ptep).maddr;
625 u->val = pte_val_ma(pteval);
626
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800627 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800628
629 xen_mc_issue(PARAVIRT_LAZY_MMU);
630}
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800631EXPORT_SYMBOL_GPL(xen_set_domain_pte);
632
633static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval)
634{
635 xen_set_domain_pte(ptep, pteval, DOMID_IO);
636}
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800637
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700638static void xen_extend_mmu_update(const struct mmu_update *update)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700639{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700640 struct multicall_space mcs;
641 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700642
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700643 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
644
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700645 if (mcs.mc != NULL) {
646 ADD_STATS(mmu_update_extended, 1);
647 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1);
648
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700649 mcs.mc->args[1]++;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700650
651 if (mcs.mc->args[1] < MMU_UPDATE_HISTO)
652 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1);
653 else
654 ADD_STATS(mmu_update_histo[0], 1);
655 } else {
656 ADD_STATS(mmu_update, 1);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700657 mcs = __xen_mc_entry(sizeof(*u));
658 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700659 ADD_STATS(mmu_update_histo[1], 1);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700660 }
661
662 u = mcs.args;
663 *u = *update;
664}
665
666void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
667{
668 struct mmu_update u;
669
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700670 preempt_disable();
671
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700672 xen_mc_batch();
673
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700674 /* ptr may be ioremapped for 64-bit pagetable setup */
675 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700676 u.val = pmd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700677 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700678
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700679 ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
680
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700681 xen_mc_issue(PARAVIRT_LAZY_MMU);
682
683 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700684}
685
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100686void xen_set_pmd(pmd_t *ptr, pmd_t val)
687{
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700688 ADD_STATS(pmd_update, 1);
689
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100690 /* If page is not pinned, we can just update the entry
691 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700692 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100693 *ptr = val;
694 return;
695 }
696
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700697 ADD_STATS(pmd_update_pinned, 1);
698
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100699 xen_set_pmd_hyper(ptr, val);
700}
701
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700702/*
703 * Associate a virtual page frame with a given physical page frame
704 * and protection flags for that frame.
705 */
706void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
707{
Jeremy Fitzhardinge836fe2f2008-07-08 15:06:58 -0700708 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700709}
710
711void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
712 pte_t *ptep, pte_t pteval)
713{
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800714 if (xen_iomap_pte(pteval)) {
715 xen_set_iomap_pte(ptep, pteval);
716 goto out;
717 }
718
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700719 ADD_STATS(set_pte_at, 1);
720// ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep));
721 ADD_STATS(set_pte_at_current, mm == current->mm);
722 ADD_STATS(set_pte_at_kernel, mm == &init_mm);
723
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700724 if (mm == current->mm || mm == &init_mm) {
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700725 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700726 struct multicall_space mcs;
727 mcs = xen_mc_entry(0);
728
729 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700730 ADD_STATS(set_pte_at_batched, 1);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700731 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700732 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700733 } else
734 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700735 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700736 }
737 xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700738
Jeremy Fitzhardinge2829b442009-02-17 23:53:19 -0800739out: return;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700740}
741
Tejf63c2f22008-12-16 11:56:06 -0800742pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
743 unsigned long addr, pte_t *ptep)
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700744{
745 /* Just return the pte as-is. We preserve the bits on commit */
746 return *ptep;
747}
748
749void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
750 pte_t *ptep, pte_t pte)
751{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700752 struct mmu_update u;
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700753
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700754 xen_mc_batch();
755
Chris Lalancette9f32d212008-10-23 17:40:25 -0700756 u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700757 u.val = pte_val_ma(pte);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700758 xen_extend_mmu_update(&u);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700759
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700760 ADD_STATS(prot_commit, 1);
761 ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
762
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700763 xen_mc_issue(PARAVIRT_LAZY_MMU);
764}
765
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700766/* Assume pteval_t is equivalent to all the other *val_t types. */
767static pteval_t pte_mfn_to_pfn(pteval_t val)
768{
769 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700770 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700771 pteval_t flags = val & PTE_FLAGS_MASK;
Jeremy Fitzhardinged8355ac2008-07-03 22:10:18 -0700772 val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700773 }
774
775 return val;
776}
777
778static pteval_t pte_pfn_to_mfn(pteval_t val)
779{
780 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700781 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700782 pteval_t flags = val & PTE_FLAGS_MASK;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700783 unsigned long mfn = pfn_to_mfn(pfn);
784
785 /*
786 * If there's no mfn for the pfn, then just create an
787 * empty non-present pte. Unfortunately this loses
788 * information about the original pfn, so
789 * pte_mfn_to_pfn is asymmetric.
790 */
791 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
792 mfn = 0;
793 flags = 0;
794 }
795
796 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700797 }
798
799 return val;
800}
801
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800802static pteval_t iomap_pte(pteval_t val)
803{
804 if (val & _PAGE_PRESENT) {
805 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
806 pteval_t flags = val & PTE_FLAGS_MASK;
807
808 /* We assume the pte frame number is a MFN, so
809 just use it as-is. */
810 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
811 }
812
813 return val;
814}
815
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700816pteval_t xen_pte_val(pte_t pte)
817{
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700818 pteval_t pteval = pte.pte;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800819
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700820 /* If this is a WC pte, convert back from Xen WC to Linux WC */
821 if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {
822 WARN_ON(!pat_enabled);
823 pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
824 }
825
826 if (xen_initial_domain() && (pteval & _PAGE_IOMAP))
827 return pteval;
828
829 return pte_mfn_to_pfn(pteval);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700830}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800831PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700832
833pgdval_t xen_pgd_val(pgd_t pgd)
834{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700835 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700836}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800837PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700838
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700839/*
840 * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7
841 * are reserved for now, to correspond to the Intel-reserved PAT
842 * types.
843 *
844 * We expect Linux's PAT set as follows:
845 *
846 * Idx PTE flags Linux Xen Default
847 * 0 WB WB WB
848 * 1 PWT WC WT WT
849 * 2 PCD UC- UC- UC-
850 * 3 PCD PWT UC UC UC
851 * 4 PAT WB WC WB
852 * 5 PAT PWT WC WP WT
853 * 6 PAT PCD UC- UC UC-
854 * 7 PAT PCD PWT UC UC UC
855 */
856
857void xen_set_pat(u64 pat)
858{
859 /* We expect Linux to use a PAT setting of
860 * UC UC- WC WB (ignoring the PAT flag) */
861 WARN_ON(pat != 0x0007010600070106ull);
862}
863
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700864pte_t xen_make_pte(pteval_t pte)
865{
Alex Nixon7347b402010-02-19 13:31:06 -0500866 phys_addr_t addr = (pte & PTE_PFN_MASK);
867
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700868 /* If Linux is trying to set a WC pte, then map to the Xen WC.
869 * If _PAGE_PAT is set, then it probably means it is really
870 * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope
871 * things work out OK...
872 *
873 * (We should never see kernel mappings with _PAGE_PSE set,
874 * but we could see hugetlbfs mappings, I think.).
875 */
876 if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) {
877 if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)
878 pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
879 }
880
Alex Nixon7347b402010-02-19 13:31:06 -0500881 /*
882 * Unprivileged domains are allowed to do IOMAPpings for
883 * PCI passthrough, but not map ISA space. The ISA
884 * mappings are just dummy local mappings to keep other
885 * parts of the kernel happy.
886 */
887 if (unlikely(pte & _PAGE_IOMAP) &&
888 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800889 pte = iomap_pte(pte);
Alex Nixon7347b402010-02-19 13:31:06 -0500890 } else {
891 pte &= ~_PAGE_IOMAP;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800892 pte = pte_pfn_to_mfn(pte);
Alex Nixon7347b402010-02-19 13:31:06 -0500893 }
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800894
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700895 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700896}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800897PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700898
899pgd_t xen_make_pgd(pgdval_t pgd)
900{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700901 pgd = pte_pfn_to_mfn(pgd);
902 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700903}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800904PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700905
906pmdval_t xen_pmd_val(pmd_t pmd)
907{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700908 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700909}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800910PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100911
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100912void xen_set_pud_hyper(pud_t *ptr, pud_t val)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700913{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700914 struct mmu_update u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700915
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700916 preempt_disable();
917
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700918 xen_mc_batch();
919
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700920 /* ptr may be ioremapped for 64-bit pagetable setup */
921 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700922 u.val = pud_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700923 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700924
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700925 ADD_STATS(pud_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
926
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700927 xen_mc_issue(PARAVIRT_LAZY_MMU);
928
929 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700930}
931
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100932void xen_set_pud(pud_t *ptr, pud_t val)
933{
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700934 ADD_STATS(pud_update, 1);
935
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100936 /* If page is not pinned, we can just update the entry
937 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700938 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100939 *ptr = val;
940 return;
941 }
942
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700943 ADD_STATS(pud_update_pinned, 1);
944
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100945 xen_set_pud_hyper(ptr, val);
946}
947
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700948void xen_set_pte(pte_t *ptep, pte_t pte)
949{
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800950 if (xen_iomap_pte(pte)) {
951 xen_set_iomap_pte(ptep, pte);
952 return;
953 }
954
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700955 ADD_STATS(pte_update, 1);
956// ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
957 ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
958
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700959#ifdef CONFIG_X86_PAE
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700960 ptep->pte_high = pte.pte_high;
961 smp_wmb();
962 ptep->pte_low = pte.pte_low;
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700963#else
964 *ptep = pte;
965#endif
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700966}
967
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700968#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700969void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
970{
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800971 if (xen_iomap_pte(pte)) {
972 xen_set_iomap_pte(ptep, pte);
973 return;
974 }
975
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700976 set_64bit((u64 *)ptep, native_pte_val(pte));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700977}
978
979void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
980{
981 ptep->pte_low = 0;
982 smp_wmb(); /* make sure low gets written first */
983 ptep->pte_high = 0;
984}
985
986void xen_pmd_clear(pmd_t *pmdp)
987{
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100988 set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700989}
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700990#endif /* CONFIG_X86_PAE */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700991
Jeremy Fitzhardingeabf33032008-03-17 16:37:07 -0700992pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700993{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700994 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700995 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700996}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800997PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700998
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700999#if PAGETABLE_LEVELS == 4
1000pudval_t xen_pud_val(pud_t pud)
1001{
1002 return pte_mfn_to_pfn(pud.pud);
1003}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08001004PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -07001005
1006pud_t xen_make_pud(pudval_t pud)
1007{
1008 pud = pte_pfn_to_mfn(pud);
1009
1010 return native_make_pud(pud);
1011}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08001012PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -07001013
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001014pgd_t *xen_get_user_pgd(pgd_t *pgd)
1015{
1016 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
1017 unsigned offset = pgd - pgd_page;
1018 pgd_t *user_ptr = NULL;
1019
1020 if (offset < pgd_index(USER_LIMIT)) {
1021 struct page *page = virt_to_page(pgd_page);
1022 user_ptr = (pgd_t *)page->private;
1023 if (user_ptr)
1024 user_ptr += offset;
1025 }
1026
1027 return user_ptr;
1028}
1029
1030static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -07001031{
1032 struct mmu_update u;
1033
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -07001034 u.ptr = virt_to_machine(ptr).maddr;
1035 u.val = pgd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001036 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001037}
1038
1039/*
1040 * Raw hypercall-based set_pgd, intended for in early boot before
1041 * there's a page structure. This implies:
1042 * 1. The only existing pagetable is the kernel's
1043 * 2. It is always pinned
1044 * 3. It has no user pagetable attached to it
1045 */
1046void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
1047{
1048 preempt_disable();
1049
1050 xen_mc_batch();
1051
1052 __xen_set_pgd_hyper(ptr, val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -07001053
1054 xen_mc_issue(PARAVIRT_LAZY_MMU);
1055
1056 preempt_enable();
1057}
1058
1059void xen_set_pgd(pgd_t *ptr, pgd_t val)
1060{
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001061 pgd_t *user_ptr = xen_get_user_pgd(ptr);
1062
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07001063 ADD_STATS(pgd_update, 1);
1064
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -07001065 /* If page is not pinned, we can just update the entry
1066 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001067 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -07001068 *ptr = val;
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001069 if (user_ptr) {
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001070 WARN_ON(xen_page_pinned(user_ptr));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001071 *user_ptr = val;
1072 }
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -07001073 return;
1074 }
1075
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07001076 ADD_STATS(pgd_update_pinned, 1);
1077 ADD_STATS(pgd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
1078
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001079 /* If it's pinned, then we can at least batch the kernel and
1080 user updates together. */
1081 xen_mc_batch();
1082
1083 __xen_set_pgd_hyper(ptr, val);
1084 if (user_ptr)
1085 __xen_set_pgd_hyper(user_ptr, val);
1086
1087 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -07001088}
1089#endif /* PAGETABLE_LEVELS == 4 */
1090
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001091/*
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001092 * (Yet another) pagetable walker. This one is intended for pinning a
1093 * pagetable. This means that it walks a pagetable and calls the
1094 * callback function on each page it finds making up the page table,
1095 * at every level. It walks the entire pagetable, but it only bothers
1096 * pinning pte pages which are below limit. In the normal case this
1097 * will be STACK_TOP_MAX, but at boot we need to pin up to
1098 * FIXADDR_TOP.
1099 *
1100 * For 32-bit the important bit is that we don't pin beyond there,
1101 * because then we start getting into Xen's ptes.
1102 *
1103 * For 64-bit, we must skip the Xen hole in the middle of the address
1104 * space, just after the big x86-64 virtual hole.
1105 */
Ian Campbell86bbc2c2008-11-21 10:21:33 +00001106static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
1107 int (*func)(struct mm_struct *mm, struct page *,
1108 enum pt_level),
1109 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001110{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001111 int flush = 0;
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001112 unsigned hole_low, hole_high;
1113 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
1114 unsigned pgdidx, pudidx, pmdidx;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001115
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001116 /* The limit is the last byte to be touched */
1117 limit--;
1118 BUG_ON(limit >= FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001119
1120 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001121 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001122
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001123 /*
1124 * 64-bit has a great big hole in the middle of the address
1125 * space, which contains the Xen mappings. On 32-bit these
1126 * will end up making a zero-sized hole and so is a no-op.
1127 */
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001128 hole_low = pgd_index(USER_LIMIT);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001129 hole_high = pgd_index(PAGE_OFFSET);
1130
1131 pgdidx_limit = pgd_index(limit);
1132#if PTRS_PER_PUD > 1
1133 pudidx_limit = pud_index(limit);
1134#else
1135 pudidx_limit = 0;
1136#endif
1137#if PTRS_PER_PMD > 1
1138 pmdidx_limit = pmd_index(limit);
1139#else
1140 pmdidx_limit = 0;
1141#endif
1142
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001143 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001144 pud_t *pud;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001145
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001146 if (pgdidx >= hole_low && pgdidx < hole_high)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001147 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001148
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001149 if (!pgd_val(pgd[pgdidx]))
1150 continue;
1151
1152 pud = pud_offset(&pgd[pgdidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001153
1154 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001155 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001156
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001157 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001158 pmd_t *pmd;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001159
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001160 if (pgdidx == pgdidx_limit &&
1161 pudidx > pudidx_limit)
1162 goto out;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001163
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001164 if (pud_none(pud[pudidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001165 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001166
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001167 pmd = pmd_offset(&pud[pudidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001168
1169 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001170 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001171
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001172 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
1173 struct page *pte;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001174
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001175 if (pgdidx == pgdidx_limit &&
1176 pudidx == pudidx_limit &&
1177 pmdidx > pmdidx_limit)
1178 goto out;
1179
1180 if (pmd_none(pmd[pmdidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001181 continue;
1182
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001183 pte = pmd_page(pmd[pmdidx]);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001184 flush |= (*func)(mm, pte, PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001185 }
1186 }
1187 }
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -07001188
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001189out:
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -07001190 /* Do the top level last, so that the callbacks can use it as
1191 a cue to do final things like tlb flushes. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001192 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001193
1194 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001195}
1196
Ian Campbell86bbc2c2008-11-21 10:21:33 +00001197static int xen_pgd_walk(struct mm_struct *mm,
1198 int (*func)(struct mm_struct *mm, struct page *,
1199 enum pt_level),
1200 unsigned long limit)
1201{
1202 return __xen_pgd_walk(mm, mm->pgd, func, limit);
1203}
1204
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001205/* If we're using split pte locks, then take the page's lock and
1206 return a pointer to it. Otherwise return NULL. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001207static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001208{
1209 spinlock_t *ptl = NULL;
1210
Jeremy Fitzhardingef7d0b922008-09-09 15:43:22 -07001211#if USE_SPLIT_PTLOCKS
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001212 ptl = __pte_lockptr(page);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001213 spin_lock_nest_lock(ptl, &mm->page_table_lock);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001214#endif
1215
1216 return ptl;
1217}
1218
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001219static void xen_pte_unlock(void *v)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001220{
1221 spinlock_t *ptl = v;
1222 spin_unlock(ptl);
1223}
1224
1225static void xen_do_pin(unsigned level, unsigned long pfn)
1226{
1227 struct mmuext_op *op;
1228 struct multicall_space mcs;
1229
1230 mcs = __xen_mc_entry(sizeof(*op));
1231 op = mcs.args;
1232 op->cmd = level;
1233 op->arg1.mfn = pfn_to_mfn(pfn);
1234 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1235}
1236
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001237static int xen_pin_page(struct mm_struct *mm, struct page *page,
1238 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001239{
Christoph Lameterd60cd462008-04-28 02:12:51 -07001240 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001241 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001242
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001243 if (pgfl)
1244 flush = 0; /* already pinned */
1245 else if (PageHighMem(page))
1246 /* kmaps need flushing if we found an unpinned
1247 highpage */
1248 flush = 1;
1249 else {
1250 void *pt = lowmem_page_address(page);
1251 unsigned long pfn = page_to_pfn(page);
1252 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001253 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001254
1255 flush = 0;
1256
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -07001257 /*
1258 * We need to hold the pagetable lock between the time
1259 * we make the pagetable RO and when we actually pin
1260 * it. If we don't, then other users may come in and
1261 * attempt to update the pagetable by writing it,
1262 * which will fail because the memory is RO but not
1263 * pinned, so Xen won't do the trap'n'emulate.
1264 *
1265 * If we're using split pte locks, we can't hold the
1266 * entire pagetable's worth of locks during the
1267 * traverse, because we may wrap the preempt count (8
1268 * bits). The solution is to mark RO and pin each PTE
1269 * page while holding the lock. This means the number
1270 * of locks we end up holding is never more than a
1271 * batch size (~32 entries, at present).
1272 *
1273 * If we're not using split pte locks, we needn't pin
1274 * the PTE pages independently, because we're
1275 * protected by the overall pagetable lock.
1276 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001277 ptl = NULL;
1278 if (level == PT_PTE)
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001279 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001280
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001281 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
1282 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001283 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
1284
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -07001285 if (ptl) {
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001286 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
1287
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001288 /* Queue a deferred unlock for when this batch
1289 is completed. */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001290 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001291 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001292 }
1293
1294 return flush;
1295}
1296
1297/* This is called just after a mm has been created, but it has not
1298 been used yet. We need to make sure that its pagetable is all
1299 read-only, and can be pinned. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001300static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001301{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001302 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001303
Ian Campbell86bbc2c2008-11-21 10:21:33 +00001304 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +11001305 /* re-enable interrupts for flushing */
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001306 xen_mc_issue(0);
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +11001307
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001308 kmap_flush_unused();
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +11001309
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001310 xen_mc_batch();
1311 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001312
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001313#ifdef CONFIG_X86_64
1314 {
1315 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1316
1317 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
1318
1319 if (user_pgd) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001320 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
Tejf63c2f22008-12-16 11:56:06 -08001321 xen_do_pin(MMUEXT_PIN_L4_TABLE,
1322 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001323 }
1324 }
1325#else /* CONFIG_X86_32 */
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001326#ifdef CONFIG_X86_PAE
1327 /* Need to make sure unshared kernel PMD is pinnable */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -08001328 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001329 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001330#endif
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +01001331 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001332#endif /* CONFIG_X86_64 */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001333 xen_mc_issue(0);
1334}
1335
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001336static void xen_pgd_pin(struct mm_struct *mm)
1337{
1338 __xen_pgd_pin(mm, mm->pgd);
1339}
1340
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001341/*
1342 * On save, we need to pin all pagetables to make sure they get their
1343 * mfns turned into pfns. Search the list for any unpinned pgds and pin
1344 * them (unpinned pgds are not currently in use, probably because the
1345 * process is under construction or destruction).
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001346 *
1347 * Expected to be called in stop_machine() ("equivalent to taking
1348 * every spinlock in the system"), so the locking doesn't really
1349 * matter all that much.
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001350 */
1351void xen_mm_pin_all(void)
1352{
1353 unsigned long flags;
1354 struct page *page;
1355
1356 spin_lock_irqsave(&pgd_lock, flags);
1357
1358 list_for_each_entry(page, &pgd_list, lru) {
1359 if (!PagePinned(page)) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001360 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001361 SetPageSavePinned(page);
1362 }
1363 }
1364
1365 spin_unlock_irqrestore(&pgd_lock, flags);
1366}
1367
Eduardo Habkostc1f2f092008-07-08 15:06:24 -07001368/*
1369 * The init_mm pagetable is really pinned as soon as its created, but
1370 * that's before we have page structures to store the bits. So do all
1371 * the book-keeping now.
1372 */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001373static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page,
1374 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001375{
1376 SetPagePinned(page);
1377 return 0;
1378}
1379
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001380static void __init xen_mark_init_mm_pinned(void)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001381{
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001382 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001383}
1384
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001385static int xen_unpin_page(struct mm_struct *mm, struct page *page,
1386 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001387{
Christoph Lameterd60cd462008-04-28 02:12:51 -07001388 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001389
1390 if (pgfl && !PageHighMem(page)) {
1391 void *pt = lowmem_page_address(page);
1392 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001393 spinlock_t *ptl = NULL;
1394 struct multicall_space mcs;
1395
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -07001396 /*
1397 * Do the converse to pin_page. If we're using split
1398 * pte locks, we must be holding the lock for while
1399 * the pte page is unpinned but still RO to prevent
1400 * concurrent updates from seeing it in this
1401 * partially-pinned state.
1402 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001403 if (level == PT_PTE) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001404 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001405
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -07001406 if (ptl)
1407 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001408 }
1409
1410 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001411
1412 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
1413 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001414 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
1415
1416 if (ptl) {
1417 /* unlock when batch completed */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001418 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001419 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001420 }
1421
1422 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001423}
1424
1425/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001426static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001427{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001428 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001429
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001430 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001431
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001432#ifdef CONFIG_X86_64
1433 {
1434 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1435
1436 if (user_pgd) {
Tejf63c2f22008-12-16 11:56:06 -08001437 xen_do_pin(MMUEXT_UNPIN_TABLE,
1438 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001439 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001440 }
1441 }
1442#endif
1443
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001444#ifdef CONFIG_X86_PAE
1445 /* Need to make sure unshared kernel PMD is unpinned */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -08001446 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001447 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001448#endif
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001449
Ian Campbell86bbc2c2008-11-21 10:21:33 +00001450 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001451
1452 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001453}
1454
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001455static void xen_pgd_unpin(struct mm_struct *mm)
1456{
1457 __xen_pgd_unpin(mm, mm->pgd);
1458}
1459
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001460/*
1461 * On resume, undo any pinning done at save, so that the rest of the
1462 * kernel doesn't see any unexpected pinned pagetables.
1463 */
1464void xen_mm_unpin_all(void)
1465{
1466 unsigned long flags;
1467 struct page *page;
1468
1469 spin_lock_irqsave(&pgd_lock, flags);
1470
1471 list_for_each_entry(page, &pgd_list, lru) {
1472 if (PageSavePinned(page)) {
1473 BUG_ON(!PagePinned(page));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001474 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001475 ClearPageSavePinned(page);
1476 }
1477 }
1478
1479 spin_unlock_irqrestore(&pgd_lock, flags);
1480}
1481
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001482void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
1483{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001484 spin_lock(&next->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001485 xen_pgd_pin(next);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001486 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001487}
1488
1489void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
1490{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001491 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001492 xen_pgd_pin(mm);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001493 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001494}
1495
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001496
1497#ifdef CONFIG_SMP
1498/* Another cpu may still have their %cr3 pointing at the pagetable, so
1499 we need to repoint it somewhere else before we can unpin it. */
1500static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001501{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001502 struct mm_struct *mm = info;
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001503 struct mm_struct *active_mm;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001504
Brian Gerst9eb912d2009-01-19 00:38:57 +09001505 active_mm = percpu_read(cpu_tlbstate.active_mm);
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001506
1507 if (active_mm == mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001508 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001509
1510 /* If this cpu still has a stale cr3 reference, then make sure
1511 it has been flushed. */
Jeremy Fitzhardinge7fd7d832009-02-17 23:24:03 -08001512 if (percpu_read(xen_current_cr3) == __pa(mm->pgd))
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001513 load_cr3(swapper_pg_dir);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001514}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001515
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001516static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001517{
Mike Travise4d98202008-12-16 17:34:05 -08001518 cpumask_var_t mask;
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001519 unsigned cpu;
1520
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001521 if (current->active_mm == mm) {
1522 if (current->mm == mm)
1523 load_cr3(swapper_pg_dir);
1524 else
1525 leave_mm(smp_processor_id());
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001526 }
1527
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001528 /* Get the "official" set of cpus referring to our pagetable. */
Mike Travise4d98202008-12-16 17:34:05 -08001529 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1530 for_each_online_cpu(cpu) {
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001531 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
Mike Travise4d98202008-12-16 17:34:05 -08001532 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1533 continue;
1534 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1535 }
1536 return;
1537 }
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001538 cpumask_copy(mask, mm_cpumask(mm));
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001539
1540 /* It's possible that a vcpu may have a stale reference to our
1541 cr3, because its in lazy mode, and it hasn't yet flushed
1542 its set of pending hypercalls yet. In this case, we can
1543 look at its actual current cr3 value, and force it to flush
1544 if needed. */
1545 for_each_online_cpu(cpu) {
1546 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
Mike Travise4d98202008-12-16 17:34:05 -08001547 cpumask_set_cpu(cpu, mask);
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001548 }
1549
Mike Travise4d98202008-12-16 17:34:05 -08001550 if (!cpumask_empty(mask))
1551 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1552 free_cpumask_var(mask);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001553}
1554#else
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001555static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001556{
1557 if (current->active_mm == mm)
1558 load_cr3(swapper_pg_dir);
1559}
1560#endif
1561
1562/*
1563 * While a process runs, Xen pins its pagetables, which means that the
1564 * hypervisor forces it to be read-only, and it controls all updates
1565 * to it. This means that all pagetable updates have to go via the
1566 * hypervisor, which is moderately expensive.
1567 *
1568 * Since we're pulling the pagetable down, we switch to use init_mm,
1569 * unpin old process pagetable and mark it all read-write, which
1570 * allows further operations on it to be simple memory accesses.
1571 *
1572 * The only subtle point is that another CPU may be still using the
1573 * pagetable because of lazy tlb flushing. This means we need need to
1574 * switch all CPUs off this pagetable before we can unpin it.
1575 */
1576void xen_exit_mmap(struct mm_struct *mm)
1577{
1578 get_cpu(); /* make sure we don't move around */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001579 xen_drop_mm_ref(mm);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001580 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001581
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001582 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -07001583
1584 /* pgd may not be pinned in the error exit path of execve */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001585 if (xen_page_pinned(mm->pgd))
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001586 xen_pgd_unpin(mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001587
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001588 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001589}
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07001590
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001591static __init void xen_pagetable_setup_start(pgd_t *base)
1592{
1593}
1594
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02001595static void xen_post_allocator_init(void);
1596
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001597static __init void xen_pagetable_setup_done(pgd_t *base)
1598{
1599 xen_setup_shared_info();
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02001600 xen_post_allocator_init();
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001601}
1602
1603static void xen_write_cr2(unsigned long cr2)
1604{
1605 percpu_read(xen_vcpu)->arch.cr2 = cr2;
1606}
1607
1608static unsigned long xen_read_cr2(void)
1609{
1610 return percpu_read(xen_vcpu)->arch.cr2;
1611}
1612
1613unsigned long xen_read_cr2_direct(void)
1614{
1615 return percpu_read(xen_vcpu_info.arch.cr2);
1616}
1617
1618static void xen_flush_tlb(void)
1619{
1620 struct mmuext_op *op;
1621 struct multicall_space mcs;
1622
1623 preempt_disable();
1624
1625 mcs = xen_mc_entry(sizeof(*op));
1626
1627 op = mcs.args;
1628 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1629 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1630
1631 xen_mc_issue(PARAVIRT_LAZY_MMU);
1632
1633 preempt_enable();
1634}
1635
1636static void xen_flush_tlb_single(unsigned long addr)
1637{
1638 struct mmuext_op *op;
1639 struct multicall_space mcs;
1640
1641 preempt_disable();
1642
1643 mcs = xen_mc_entry(sizeof(*op));
1644 op = mcs.args;
1645 op->cmd = MMUEXT_INVLPG_LOCAL;
1646 op->arg1.linear_addr = addr & PAGE_MASK;
1647 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1648
1649 xen_mc_issue(PARAVIRT_LAZY_MMU);
1650
1651 preempt_enable();
1652}
1653
1654static void xen_flush_tlb_others(const struct cpumask *cpus,
1655 struct mm_struct *mm, unsigned long va)
1656{
1657 struct {
1658 struct mmuext_op op;
1659 DECLARE_BITMAP(mask, NR_CPUS);
1660 } *args;
1661 struct multicall_space mcs;
1662
Jeremy Fitzhardingee3f8a742009-03-04 17:36:57 -08001663 if (cpumask_empty(cpus))
1664 return; /* nothing to do */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001665
1666 mcs = xen_mc_entry(sizeof(*args));
1667 args = mcs.args;
1668 args->op.arg2.vcpumask = to_cpumask(args->mask);
1669
1670 /* Remove us, and any offline CPUS. */
1671 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1672 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001673
1674 if (va == TLB_FLUSH_ALL) {
1675 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1676 } else {
1677 args->op.cmd = MMUEXT_INVLPG_MULTI;
1678 args->op.arg1.linear_addr = va;
1679 }
1680
1681 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1682
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001683 xen_mc_issue(PARAVIRT_LAZY_MMU);
1684}
1685
1686static unsigned long xen_read_cr3(void)
1687{
1688 return percpu_read(xen_cr3);
1689}
1690
1691static void set_current_cr3(void *v)
1692{
1693 percpu_write(xen_current_cr3, (unsigned long)v);
1694}
1695
1696static void __xen_write_cr3(bool kernel, unsigned long cr3)
1697{
1698 struct mmuext_op *op;
1699 struct multicall_space mcs;
1700 unsigned long mfn;
1701
1702 if (cr3)
1703 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1704 else
1705 mfn = 0;
1706
1707 WARN_ON(mfn == 0 && kernel);
1708
1709 mcs = __xen_mc_entry(sizeof(*op));
1710
1711 op = mcs.args;
1712 op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1713 op->arg1.mfn = mfn;
1714
1715 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1716
1717 if (kernel) {
1718 percpu_write(xen_cr3, cr3);
1719
1720 /* Update xen_current_cr3 once the batch has actually
1721 been submitted. */
1722 xen_mc_callback(set_current_cr3, (void *)cr3);
1723 }
1724}
1725
1726static void xen_write_cr3(unsigned long cr3)
1727{
1728 BUG_ON(preemptible());
1729
1730 xen_mc_batch(); /* disables interrupts */
1731
1732 /* Update while interrupts are disabled, so its atomic with
1733 respect to ipis */
1734 percpu_write(xen_cr3, cr3);
1735
1736 __xen_write_cr3(true, cr3);
1737
1738#ifdef CONFIG_X86_64
1739 {
1740 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1741 if (user_pgd)
1742 __xen_write_cr3(false, __pa(user_pgd));
1743 else
1744 __xen_write_cr3(false, 0);
1745 }
1746#endif
1747
1748 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1749}
1750
1751static int xen_pgd_alloc(struct mm_struct *mm)
1752{
1753 pgd_t *pgd = mm->pgd;
1754 int ret = 0;
1755
1756 BUG_ON(PagePinned(virt_to_page(pgd)));
1757
1758#ifdef CONFIG_X86_64
1759 {
1760 struct page *page = virt_to_page(pgd);
1761 pgd_t *user_pgd;
1762
1763 BUG_ON(page->private != 0);
1764
1765 ret = -ENOMEM;
1766
1767 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1768 page->private = (unsigned long)user_pgd;
1769
1770 if (user_pgd != NULL) {
1771 user_pgd[pgd_index(VSYSCALL_START)] =
1772 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1773 ret = 0;
1774 }
1775
1776 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1777 }
1778#endif
1779
1780 return ret;
1781}
1782
1783static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1784{
1785#ifdef CONFIG_X86_64
1786 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1787
1788 if (user_pgd)
1789 free_page((unsigned long)user_pgd);
1790#endif
1791}
1792
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001793static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1794{
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -07001795 unsigned long pfn = pte_pfn(pte);
1796
1797#ifdef CONFIG_X86_32
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001798 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1799 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1800 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1801 pte_val_ma(pte));
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -07001802#endif
1803
1804 /*
1805 * If the new pfn is within the range of the newly allocated
1806 * kernel pagetable, and it isn't being mapped into an
1807 * early_ioremap fixmap slot, make sure it is RO.
1808 */
1809 if (!is_early_ioremap_ptep(ptep) &&
1810 pfn >= e820_table_start && pfn < e820_table_end)
1811 pte = pte_wrprotect(pte);
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001812
1813 return pte;
1814}
1815
1816/* Init-time set_pte while constructing initial pagetables, which
1817 doesn't allow RO pagetable pages to be remapped RW */
1818static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
1819{
1820 pte = mask_rw_pte(ptep, pte);
1821
1822 xen_set_pte(ptep, pte);
1823}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001824
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001825static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1826{
1827 struct mmuext_op op;
1828 op.cmd = cmd;
1829 op.arg1.mfn = pfn_to_mfn(pfn);
1830 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1831 BUG();
1832}
1833
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001834/* Early in boot, while setting up the initial pagetable, assume
1835 everything is pinned. */
1836static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1837{
1838#ifdef CONFIG_FLATMEM
1839 BUG_ON(mem_map); /* should only be used early */
1840#endif
1841 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001842 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1843}
1844
1845/* Used for pmd and pud */
1846static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1847{
1848#ifdef CONFIG_FLATMEM
1849 BUG_ON(mem_map); /* should only be used early */
1850#endif
1851 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001852}
1853
1854/* Early release_pte assumes that all pts are pinned, since there's
1855 only init_mm and anything attached to that is pinned. */
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001856static __init void xen_release_pte_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001857{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001858 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001859 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1860}
1861
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001862static __init void xen_release_pmd_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001863{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001864 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001865}
1866
1867/* This needs to make sure the new pte page is pinned iff its being
1868 attached to a pinned pagetable. */
1869static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
1870{
1871 struct page *page = pfn_to_page(pfn);
1872
1873 if (PagePinned(virt_to_page(mm->pgd))) {
1874 SetPagePinned(page);
1875
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001876 if (!PageHighMem(page)) {
1877 make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
1878 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1879 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1880 } else {
1881 /* make sure there are no stray mappings of
1882 this page */
1883 kmap_flush_unused();
1884 }
1885 }
1886}
1887
1888static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1889{
1890 xen_alloc_ptpage(mm, pfn, PT_PTE);
1891}
1892
1893static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1894{
1895 xen_alloc_ptpage(mm, pfn, PT_PMD);
1896}
1897
1898/* This should never happen until we're OK to use struct page */
1899static void xen_release_ptpage(unsigned long pfn, unsigned level)
1900{
1901 struct page *page = pfn_to_page(pfn);
1902
1903 if (PagePinned(page)) {
1904 if (!PageHighMem(page)) {
1905 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1906 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1907 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1908 }
1909 ClearPagePinned(page);
1910 }
1911}
1912
1913static void xen_release_pte(unsigned long pfn)
1914{
1915 xen_release_ptpage(pfn, PT_PTE);
1916}
1917
1918static void xen_release_pmd(unsigned long pfn)
1919{
1920 xen_release_ptpage(pfn, PT_PMD);
1921}
1922
1923#if PAGETABLE_LEVELS == 4
1924static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1925{
1926 xen_alloc_ptpage(mm, pfn, PT_PUD);
1927}
1928
1929static void xen_release_pud(unsigned long pfn)
1930{
1931 xen_release_ptpage(pfn, PT_PUD);
1932}
1933#endif
1934
1935void __init xen_reserve_top(void)
1936{
1937#ifdef CONFIG_X86_32
1938 unsigned long top = HYPERVISOR_VIRT_START;
1939 struct xen_platform_parameters pp;
1940
1941 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1942 top = pp.virt_start;
1943
1944 reserve_top_address(-top);
1945#endif /* CONFIG_X86_32 */
1946}
1947
1948/*
1949 * Like __va(), but returns address in the kernel mapping (which is
1950 * all we have until the physical memory mapping has been set up.
1951 */
1952static void *__ka(phys_addr_t paddr)
1953{
1954#ifdef CONFIG_X86_64
1955 return (void *)(paddr + __START_KERNEL_map);
1956#else
1957 return __va(paddr);
1958#endif
1959}
1960
1961/* Convert a machine address to physical address */
1962static unsigned long m2p(phys_addr_t maddr)
1963{
1964 phys_addr_t paddr;
1965
1966 maddr &= PTE_PFN_MASK;
1967 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1968
1969 return paddr;
1970}
1971
1972/* Convert a machine address to kernel virtual */
1973static void *m2v(phys_addr_t maddr)
1974{
1975 return __ka(m2p(maddr));
1976}
1977
Juan Quintela4ec53872010-09-02 15:45:43 +01001978/* Set the page permissions on an identity-mapped pages */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001979static void set_page_prot(void *addr, pgprot_t prot)
1980{
1981 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1982 pte_t pte = pfn_pte(pfn, prot);
1983
1984 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1985 BUG();
1986}
1987
1988static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1989{
1990 unsigned pmdidx, pteidx;
1991 unsigned ident_pte;
1992 unsigned long pfn;
1993
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001994 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1995 PAGE_SIZE);
1996
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001997 ident_pte = 0;
1998 pfn = 0;
1999 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
2000 pte_t *pte_page;
2001
2002 /* Reuse or allocate a page of ptes */
2003 if (pmd_present(pmd[pmdidx]))
2004 pte_page = m2v(pmd[pmdidx].pmd);
2005 else {
2006 /* Check for free pte pages */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07002007 if (ident_pte == LEVEL1_IDENT_ENTRIES)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002008 break;
2009
2010 pte_page = &level1_ident_pgt[ident_pte];
2011 ident_pte += PTRS_PER_PTE;
2012
2013 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
2014 }
2015
2016 /* Install mappings */
2017 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
2018 pte_t pte;
2019
2020 if (pfn > max_pfn_mapped)
2021 max_pfn_mapped = pfn;
2022
2023 if (!pte_none(pte_page[pteidx]))
2024 continue;
2025
2026 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
2027 pte_page[pteidx] = pte;
2028 }
2029 }
2030
2031 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
2032 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
2033
2034 set_page_prot(pmd, PAGE_KERNEL_RO);
2035}
2036
2037#ifdef CONFIG_X86_64
2038static void convert_pfn_mfn(void *v)
2039{
2040 pte_t *pte = v;
2041 int i;
2042
2043 /* All levels are converted the same way, so just treat them
2044 as ptes. */
2045 for (i = 0; i < PTRS_PER_PTE; i++)
2046 pte[i] = xen_make_pte(pte[i].pte);
2047}
2048
2049/*
2050 * Set up the inital kernel pagetable.
2051 *
2052 * We can construct this by grafting the Xen provided pagetable into
2053 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
2054 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
2055 * means that only the kernel has a physical mapping to start with -
2056 * but that's enough to get __va working. We need to fill in the rest
2057 * of the physical mapping once some sort of allocator has been set
2058 * up.
2059 */
2060__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
2061 unsigned long max_pfn)
2062{
2063 pud_t *l3;
2064 pmd_t *l2;
2065
2066 /* Zap identity mapping */
2067 init_level4_pgt[0] = __pgd(0);
2068
2069 /* Pre-constructed entries are in pfn, so convert to mfn */
2070 convert_pfn_mfn(init_level4_pgt);
2071 convert_pfn_mfn(level3_ident_pgt);
2072 convert_pfn_mfn(level3_kernel_pgt);
2073
2074 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
2075 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
2076
2077 memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
2078 memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
2079
2080 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
2081 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
2082 memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
2083
2084 /* Set up identity map */
2085 xen_map_identity_early(level2_ident_pgt, max_pfn);
2086
2087 /* Make pagetable pieces RO */
2088 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
2089 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
2090 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
2091 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
2092 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
2093 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
2094
2095 /* Pin down new L4 */
2096 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
2097 PFN_DOWN(__pa_symbol(init_level4_pgt)));
2098
2099 /* Unpin Xen-provided one */
2100 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
2101
2102 /* Switch over */
2103 pgd = init_level4_pgt;
2104
2105 /*
2106 * At this stage there can be no user pgd, and no page
2107 * structure to attach it to, so make sure we just set kernel
2108 * pgd.
2109 */
2110 xen_mc_batch();
2111 __xen_write_cr3(true, __pa(pgd));
2112 xen_mc_issue(PARAVIRT_LAZY_CPU);
2113
Yinghai Lua9ce6bc2010-08-25 13:39:17 -07002114 memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002115 __pa(xen_start_info->pt_base +
2116 xen_start_info->nr_pt_frames * PAGE_SIZE),
2117 "XEN PAGETABLES");
2118
2119 return pgd;
2120}
2121#else /* !CONFIG_X86_64 */
Jeremy Fitzhardingef0991802010-08-26 16:16:28 -07002122static RESERVE_BRK_ARRAY(pmd_t, level2_kernel_pgt, PTRS_PER_PMD);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002123
2124__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
2125 unsigned long max_pfn)
2126{
2127 pmd_t *kernel_pmd;
2128
Ian Campbella2d771c2010-10-29 16:56:19 +01002129 level2_kernel_pgt = extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
Jeremy Fitzhardingef0991802010-08-26 16:16:28 -07002130
Jeremy Fitzhardinge93dbda72009-02-26 17:35:44 -08002131 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
2132 xen_start_info->nr_pt_frames * PAGE_SIZE +
2133 512*1024);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002134
2135 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
2136 memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
2137
2138 xen_map_identity_early(level2_kernel_pgt, max_pfn);
2139
2140 memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
2141 set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY],
2142 __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT));
2143
2144 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
2145 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
2146 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
2147
2148 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
2149
2150 xen_write_cr3(__pa(swapper_pg_dir));
2151
2152 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));
2153
Yinghai Lua9ce6bc2010-08-25 13:39:17 -07002154 memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
Jeremy Fitzhardinge33df4db2009-05-07 11:56:44 -07002155 __pa(xen_start_info->pt_base +
2156 xen_start_info->nr_pt_frames * PAGE_SIZE),
2157 "XEN PAGETABLES");
2158
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002159 return swapper_pg_dir;
2160}
2161#endif /* CONFIG_X86_64 */
2162
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002163static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
2164
Masami Hiramatsu3b3809a2009-04-09 10:55:33 -07002165static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002166{
2167 pte_t pte;
2168
2169 phys >>= PAGE_SHIFT;
2170
2171 switch (idx) {
2172 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
2173#ifdef CONFIG_X86_F00F_BUG
2174 case FIX_F00F_IDT:
2175#endif
2176#ifdef CONFIG_X86_32
2177 case FIX_WP_TEST:
2178 case FIX_VDSO:
2179# ifdef CONFIG_HIGHMEM
2180 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
2181# endif
2182#else
2183 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
2184#endif
Jeremy Fitzhardinge3ecb1b72009-03-07 23:48:41 -08002185 case FIX_TEXT_POKE0:
2186 case FIX_TEXT_POKE1:
2187 /* All local page mappings */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002188 pte = pfn_pte(phys, prot);
2189 break;
2190
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002191#ifdef CONFIG_X86_LOCAL_APIC
2192 case FIX_APIC_BASE: /* maps dummy local APIC */
2193 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2194 break;
2195#endif
2196
2197#ifdef CONFIG_X86_IO_APIC
2198 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
2199 /*
2200 * We just don't map the IO APIC - all access is via
2201 * hypercalls. Keep the address in the pte for reference.
2202 */
2203 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2204 break;
2205#endif
2206
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08002207 case FIX_PARAVIRT_BOOTMAP:
2208 /* This is an MFN, but it isn't an IO mapping from the
2209 IO domain */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002210 pte = mfn_pte(phys, prot);
2211 break;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08002212
2213 default:
2214 /* By default, set_fixmap is used for hardware mappings */
2215 pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP));
2216 break;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002217 }
2218
2219 __native_set_fixmap(idx, pte);
2220
2221#ifdef CONFIG_X86_64
2222 /* Replicate changes to map the vsyscall page into the user
2223 pagetable vsyscall mapping. */
2224 if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) {
2225 unsigned long vaddr = __fix_to_virt(idx);
2226 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
2227 }
2228#endif
2229}
2230
Juan Quintela4ec53872010-09-02 15:45:43 +01002231__init void xen_ident_map_ISA(void)
2232{
2233 unsigned long pa;
2234
2235 /*
2236 * If we're dom0, then linear map the ISA machine addresses into
2237 * the kernel's address space.
2238 */
2239 if (!xen_initial_domain())
2240 return;
2241
2242 xen_raw_printk("Xen: setup ISA identity maps\n");
2243
2244 for (pa = ISA_START_ADDRESS; pa < ISA_END_ADDRESS; pa += PAGE_SIZE) {
2245 pte_t pte = mfn_pte(PFN_DOWN(pa), PAGE_KERNEL_IO);
2246
2247 if (HYPERVISOR_update_va_mapping(PAGE_OFFSET + pa, pte, 0))
2248 BUG();
2249 }
2250
2251 xen_flush_tlb();
2252}
2253
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02002254static __init void xen_post_allocator_init(void)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002255{
2256 pv_mmu_ops.set_pte = xen_set_pte;
2257 pv_mmu_ops.set_pmd = xen_set_pmd;
2258 pv_mmu_ops.set_pud = xen_set_pud;
2259#if PAGETABLE_LEVELS == 4
2260 pv_mmu_ops.set_pgd = xen_set_pgd;
2261#endif
2262
2263 /* This will work as long as patching hasn't happened yet
2264 (which it hasn't) */
2265 pv_mmu_ops.alloc_pte = xen_alloc_pte;
2266 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
2267 pv_mmu_ops.release_pte = xen_release_pte;
2268 pv_mmu_ops.release_pmd = xen_release_pmd;
2269#if PAGETABLE_LEVELS == 4
2270 pv_mmu_ops.alloc_pud = xen_alloc_pud;
2271 pv_mmu_ops.release_pud = xen_release_pud;
2272#endif
2273
2274#ifdef CONFIG_X86_64
2275 SetPagePinned(virt_to_page(level3_user_vsyscall));
2276#endif
2277 xen_mark_init_mm_pinned();
2278}
2279
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002280static void xen_leave_lazy_mmu(void)
2281{
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08002282 preempt_disable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002283 xen_mc_flush();
2284 paravirt_leave_lazy_mmu();
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08002285 preempt_enable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002286}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002287
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002288static const struct pv_mmu_ops xen_mmu_ops __initdata = {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002289 .read_cr2 = xen_read_cr2,
2290 .write_cr2 = xen_write_cr2,
2291
2292 .read_cr3 = xen_read_cr3,
2293 .write_cr3 = xen_write_cr3,
2294
2295 .flush_tlb_user = xen_flush_tlb,
2296 .flush_tlb_kernel = xen_flush_tlb,
2297 .flush_tlb_single = xen_flush_tlb_single,
2298 .flush_tlb_others = xen_flush_tlb_others,
2299
2300 .pte_update = paravirt_nop,
2301 .pte_update_defer = paravirt_nop,
2302
2303 .pgd_alloc = xen_pgd_alloc,
2304 .pgd_free = xen_pgd_free,
2305
2306 .alloc_pte = xen_alloc_pte_init,
2307 .release_pte = xen_release_pte_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002308 .alloc_pmd = xen_alloc_pmd_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002309 .release_pmd = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002310
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002311 .set_pte = xen_set_pte_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002312 .set_pte_at = xen_set_pte_at,
2313 .set_pmd = xen_set_pmd_hyper,
2314
2315 .ptep_modify_prot_start = __ptep_modify_prot_start,
2316 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2317
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002318 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2319 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002320
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002321 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
2322 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002323
2324#ifdef CONFIG_X86_PAE
2325 .set_pte_atomic = xen_set_pte_atomic,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002326 .pte_clear = xen_pte_clear,
2327 .pmd_clear = xen_pmd_clear,
2328#endif /* CONFIG_X86_PAE */
2329 .set_pud = xen_set_pud_hyper,
2330
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002331 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2332 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002333
2334#if PAGETABLE_LEVELS == 4
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002335 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2336 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002337 .set_pgd = xen_set_pgd_hyper,
2338
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002339 .alloc_pud = xen_alloc_pmd_init,
2340 .release_pud = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002341#endif /* PAGETABLE_LEVELS == 4 */
2342
2343 .activate_mm = xen_activate_mm,
2344 .dup_mmap = xen_dup_mmap,
2345 .exit_mmap = xen_exit_mmap,
2346
2347 .lazy_mode = {
2348 .enter = paravirt_enter_lazy_mmu,
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002349 .leave = xen_leave_lazy_mmu,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002350 },
2351
2352 .set_fixmap = xen_set_fixmap,
2353};
2354
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002355void __init xen_init_mmu_ops(void)
2356{
2357 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
2358 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
2359 pv_mmu_ops = xen_mmu_ops;
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -07002360
2361 vmap_lazy_unmap = false;
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002362
2363 memset(dummy_mapping, 0xff, PAGE_SIZE);
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002364}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002365
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002366/* Protected by xen_reservation_lock. */
2367#define MAX_CONTIG_ORDER 9 /* 2MB */
2368static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2369
2370#define VOID_PTE (mfn_pte(0, __pgprot(0)))
2371static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2372 unsigned long *in_frames,
2373 unsigned long *out_frames)
2374{
2375 int i;
2376 struct multicall_space mcs;
2377
2378 xen_mc_batch();
2379 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2380 mcs = __xen_mc_entry(0);
2381
2382 if (in_frames)
2383 in_frames[i] = virt_to_mfn(vaddr);
2384
2385 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
2386 set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
2387
2388 if (out_frames)
2389 out_frames[i] = virt_to_pfn(vaddr);
2390 }
2391 xen_mc_issue(0);
2392}
2393
2394/*
2395 * Update the pfn-to-mfn mappings for a virtual address range, either to
2396 * point to an array of mfns, or contiguously from a single starting
2397 * mfn.
2398 */
2399static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2400 unsigned long *mfns,
2401 unsigned long first_mfn)
2402{
2403 unsigned i, limit;
2404 unsigned long mfn;
2405
2406 xen_mc_batch();
2407
2408 limit = 1u << order;
2409 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2410 struct multicall_space mcs;
2411 unsigned flags;
2412
2413 mcs = __xen_mc_entry(0);
2414 if (mfns)
2415 mfn = mfns[i];
2416 else
2417 mfn = first_mfn + i;
2418
2419 if (i < (limit - 1))
2420 flags = 0;
2421 else {
2422 if (order == 0)
2423 flags = UVMF_INVLPG | UVMF_ALL;
2424 else
2425 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2426 }
2427
2428 MULTI_update_va_mapping(mcs.mc, vaddr,
2429 mfn_pte(mfn, PAGE_KERNEL), flags);
2430
2431 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2432 }
2433
2434 xen_mc_issue(0);
2435}
2436
2437/*
2438 * Perform the hypercall to exchange a region of our pfns to point to
2439 * memory with the required contiguous alignment. Takes the pfns as
2440 * input, and populates mfns as output.
2441 *
2442 * Returns a success code indicating whether the hypervisor was able to
2443 * satisfy the request or not.
2444 */
2445static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2446 unsigned long *pfns_in,
2447 unsigned long extents_out,
2448 unsigned int order_out,
2449 unsigned long *mfns_out,
2450 unsigned int address_bits)
2451{
2452 long rc;
2453 int success;
2454
2455 struct xen_memory_exchange exchange = {
2456 .in = {
2457 .nr_extents = extents_in,
2458 .extent_order = order_in,
2459 .extent_start = pfns_in,
2460 .domid = DOMID_SELF
2461 },
2462 .out = {
2463 .nr_extents = extents_out,
2464 .extent_order = order_out,
2465 .extent_start = mfns_out,
2466 .address_bits = address_bits,
2467 .domid = DOMID_SELF
2468 }
2469 };
2470
2471 BUG_ON(extents_in << order_in != extents_out << order_out);
2472
2473 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2474 success = (exchange.nr_exchanged == extents_in);
2475
2476 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2477 BUG_ON(success && (rc != 0));
2478
2479 return success;
2480}
2481
2482int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
2483 unsigned int address_bits)
2484{
2485 unsigned long *in_frames = discontig_frames, out_frame;
2486 unsigned long flags;
2487 int success;
2488
2489 /*
2490 * Currently an auto-translated guest will not perform I/O, nor will
2491 * it require PAE page directories below 4GB. Therefore any calls to
2492 * this function are redundant and can be ignored.
2493 */
2494
2495 if (xen_feature(XENFEAT_auto_translated_physmap))
2496 return 0;
2497
2498 if (unlikely(order > MAX_CONTIG_ORDER))
2499 return -ENOMEM;
2500
2501 memset((void *) vstart, 0, PAGE_SIZE << order);
2502
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002503 spin_lock_irqsave(&xen_reservation_lock, flags);
2504
2505 /* 1. Zap current PTEs, remembering MFNs. */
2506 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2507
2508 /* 2. Get a new contiguous memory extent. */
2509 out_frame = virt_to_pfn(vstart);
2510 success = xen_exchange_memory(1UL << order, 0, in_frames,
2511 1, order, &out_frame,
2512 address_bits);
2513
2514 /* 3. Map the new extent in place of old pages. */
2515 if (success)
2516 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2517 else
2518 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2519
2520 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2521
2522 return success ? 0 : -ENOMEM;
2523}
2524EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2525
2526void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
2527{
2528 unsigned long *out_frames = discontig_frames, in_frame;
2529 unsigned long flags;
2530 int success;
2531
2532 if (xen_feature(XENFEAT_auto_translated_physmap))
2533 return;
2534
2535 if (unlikely(order > MAX_CONTIG_ORDER))
2536 return;
2537
2538 memset((void *) vstart, 0, PAGE_SIZE << order);
2539
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002540 spin_lock_irqsave(&xen_reservation_lock, flags);
2541
2542 /* 1. Find start MFN of contiguous extent. */
2543 in_frame = virt_to_mfn(vstart);
2544
2545 /* 2. Zap current PTEs. */
2546 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2547
2548 /* 3. Do the exchange for non-contiguous MFNs. */
2549 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2550 0, out_frames, 0);
2551
2552 /* 4. Map new pages in place of old pages. */
2553 if (success)
2554 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2555 else
2556 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2557
2558 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2559}
2560EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2561
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002562#ifdef CONFIG_XEN_PVHVM
Stefano Stabellini59151002010-06-17 14:22:52 +01002563static void xen_hvm_exit_mmap(struct mm_struct *mm)
2564{
2565 struct xen_hvm_pagetable_dying a;
2566 int rc;
2567
2568 a.domid = DOMID_SELF;
2569 a.gpa = __pa(mm->pgd);
2570 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2571 WARN_ON_ONCE(rc < 0);
2572}
2573
2574static int is_pagetable_dying_supported(void)
2575{
2576 struct xen_hvm_pagetable_dying a;
2577 int rc = 0;
2578
2579 a.domid = DOMID_SELF;
2580 a.gpa = 0x00;
2581 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2582 if (rc < 0) {
2583 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
2584 return 0;
2585 }
2586 return 1;
2587}
2588
2589void __init xen_hvm_init_mmu_ops(void)
2590{
2591 if (is_pagetable_dying_supported())
2592 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
2593}
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002594#endif
Stefano Stabellini59151002010-06-17 14:22:52 +01002595
Ian Campbellde1ef202009-05-21 10:09:46 +01002596#define REMAP_BATCH_SIZE 16
2597
2598struct remap_data {
2599 unsigned long mfn;
2600 pgprot_t prot;
2601 struct mmu_update *mmu_update;
2602};
2603
2604static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2605 unsigned long addr, void *data)
2606{
2607 struct remap_data *rmd = data;
2608 pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
2609
2610 rmd->mmu_update->ptr = arbitrary_virt_to_machine(ptep).maddr;
2611 rmd->mmu_update->val = pte_val_ma(pte);
2612 rmd->mmu_update++;
2613
2614 return 0;
2615}
2616
2617int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2618 unsigned long addr,
2619 unsigned long mfn, int nr,
2620 pgprot_t prot, unsigned domid)
2621{
2622 struct remap_data rmd;
2623 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
2624 int batch;
2625 unsigned long range;
2626 int err = 0;
2627
2628 prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
2629
2630 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
2631
2632 rmd.mfn = mfn;
2633 rmd.prot = prot;
2634
2635 while (nr) {
2636 batch = min(REMAP_BATCH_SIZE, nr);
2637 range = (unsigned long)batch << PAGE_SHIFT;
2638
2639 rmd.mmu_update = mmu_update;
2640 err = apply_to_page_range(vma->vm_mm, addr, range,
2641 remap_area_mfn_pte_fn, &rmd);
2642 if (err)
2643 goto out;
2644
2645 err = -EFAULT;
2646 if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0)
2647 goto out;
2648
2649 nr -= batch;
2650 addr += range;
2651 }
2652
2653 err = 0;
2654out:
2655
2656 flush_tlb_all();
2657
2658 return err;
2659}
2660EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
2661
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07002662#ifdef CONFIG_XEN_DEBUG_FS
2663
2664static struct dentry *d_mmu_debug;
2665
2666static int __init xen_mmu_debugfs(void)
2667{
2668 struct dentry *d_xen = xen_init_debugfs();
2669
2670 if (d_xen == NULL)
2671 return -ENOMEM;
2672
2673 d_mmu_debug = debugfs_create_dir("mmu", d_xen);
2674
2675 debugfs_create_u8("zero_stats", 0644, d_mmu_debug, &zero_stats);
2676
2677 debugfs_create_u32("pgd_update", 0444, d_mmu_debug, &mmu_stats.pgd_update);
2678 debugfs_create_u32("pgd_update_pinned", 0444, d_mmu_debug,
2679 &mmu_stats.pgd_update_pinned);
2680 debugfs_create_u32("pgd_update_batched", 0444, d_mmu_debug,
2681 &mmu_stats.pgd_update_pinned);
2682
2683 debugfs_create_u32("pud_update", 0444, d_mmu_debug, &mmu_stats.pud_update);
2684 debugfs_create_u32("pud_update_pinned", 0444, d_mmu_debug,
2685 &mmu_stats.pud_update_pinned);
2686 debugfs_create_u32("pud_update_batched", 0444, d_mmu_debug,
2687 &mmu_stats.pud_update_pinned);
2688
2689 debugfs_create_u32("pmd_update", 0444, d_mmu_debug, &mmu_stats.pmd_update);
2690 debugfs_create_u32("pmd_update_pinned", 0444, d_mmu_debug,
2691 &mmu_stats.pmd_update_pinned);
2692 debugfs_create_u32("pmd_update_batched", 0444, d_mmu_debug,
2693 &mmu_stats.pmd_update_pinned);
2694
2695 debugfs_create_u32("pte_update", 0444, d_mmu_debug, &mmu_stats.pte_update);
2696// debugfs_create_u32("pte_update_pinned", 0444, d_mmu_debug,
2697// &mmu_stats.pte_update_pinned);
2698 debugfs_create_u32("pte_update_batched", 0444, d_mmu_debug,
2699 &mmu_stats.pte_update_pinned);
2700
2701 debugfs_create_u32("mmu_update", 0444, d_mmu_debug, &mmu_stats.mmu_update);
2702 debugfs_create_u32("mmu_update_extended", 0444, d_mmu_debug,
2703 &mmu_stats.mmu_update_extended);
2704 xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug,
2705 mmu_stats.mmu_update_histo, 20);
2706
2707 debugfs_create_u32("set_pte_at", 0444, d_mmu_debug, &mmu_stats.set_pte_at);
2708 debugfs_create_u32("set_pte_at_batched", 0444, d_mmu_debug,
2709 &mmu_stats.set_pte_at_batched);
2710 debugfs_create_u32("set_pte_at_current", 0444, d_mmu_debug,
2711 &mmu_stats.set_pte_at_current);
2712 debugfs_create_u32("set_pte_at_kernel", 0444, d_mmu_debug,
2713 &mmu_stats.set_pte_at_kernel);
2714
2715 debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit);
2716 debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug,
2717 &mmu_stats.prot_commit_batched);
2718
2719 return 0;
2720}
2721fs_initcall(xen_mmu_debugfs);
2722
2723#endif /* CONFIG_XEN_DEBUG_FS */