blob: feb6d86fa0a00b56abfb6abf0dbcfd2b95c134a9 [file] [log] [blame]
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -07001/*
2 * Machine specific setup for xen
3 *
4 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
5 */
6
7#include <linux/module.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/pm.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -070011#include <linux/memblock.h>
Len Brownd91ee582011-04-01 18:28:35 -040012#include <linux/cpuidle.h>
Konrad Rzeszutek Wilk48cdd822012-03-13 20:06:57 -040013#include <linux/cpufreq.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070014
15#include <asm/elf.h>
Roland McGrath6c3652e2008-01-30 13:30:42 +010016#include <asm/vdso.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070017#include <asm/e820.h>
18#include <asm/setup.h>
Jeremy Fitzhardingeb792c752008-06-16 14:54:49 -070019#include <asm/acpi.h>
Konrad Rzeszutek Wilk8d54db792012-08-17 10:22:37 -040020#include <asm/numa.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070021#include <asm/xen/hypervisor.h>
22#include <asm/xen/hypercall.h>
23
Ian Campbell45263cb2010-10-25 16:32:29 -070024#include <xen/xen.h>
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +010025#include <xen/page.h>
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -070026#include <xen/interface/callback.h>
Ian Campbell35ae11f2009-02-06 19:09:48 -080027#include <xen/interface/memory.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070028#include <xen/interface/physdev.h>
29#include <xen/features.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070030#include "xen-ops.h"
Roland McGrathd2eea682007-07-20 00:31:43 -070031#include "vdso.h"
Matt Rushton4fbb67e32014-08-11 11:57:57 -070032#include "p2m.h"
Juergen Gross1f3ac862014-11-28 11:53:53 +010033#include "mmu.h"
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070034
35/* These are code, but not functions. Defined in entry.S */
36extern const char xen_hypervisor_callback[];
37extern const char xen_failsafe_callback[];
Konrad Rzeszutek Wilk6efa20e2013-07-19 11:51:31 -040038#ifdef CONFIG_X86_64
Andi Kleen07ba06d2013-10-22 09:07:59 -070039extern asmlinkage void nmi(void);
Konrad Rzeszutek Wilk6efa20e2013-07-19 11:51:31 -040040#endif
Tejf63c2f22008-12-16 11:56:06 -080041extern void xen_sysenter_target(void);
42extern void xen_syscall_target(void);
43extern void xen_syscall32_target(void);
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070044
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070045/* Amount of extra memory space we add to the e820 ranges */
David Vrabel8b5d44a2011-09-28 17:46:34 +010046struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070047
David Vrabelaa244112011-09-28 17:46:32 +010048/* Number of pages released from the initial allocation. */
49unsigned long xen_released_pages;
50
Juergen Gross1f3ac862014-11-28 11:53:53 +010051/*
52 * Buffer used to remap identity mapped pages. We only need the virtual space.
53 * The physical page behind this address is remapped as needed to different
54 * buffer pages.
55 */
56#define REMAP_SIZE (P2M_PER_PAGE - 3)
57static struct {
58 unsigned long next_area_mfn;
59 unsigned long target_pfn;
60 unsigned long size;
61 unsigned long mfns[REMAP_SIZE];
62} xen_remap_buf __initdata __aligned(PAGE_SIZE);
63static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
Matt Rushton4fbb67e32014-08-11 11:57:57 -070064
Jeremy Fitzhardinge698bb8d2010-09-14 10:19:14 -070065/*
66 * The maximum amount of extra memory compared to the base size. The
67 * main scaling factor is the size of struct page. At extreme ratios
68 * of base:extra, all the base memory can be filled with page
69 * structures for the extra memory, leaving no space for anything
70 * else.
71 *
72 * 10x seems like a reasonable balance between scaling flexibility and
73 * leaving a practically usable system.
74 */
75#define EXTRA_MEM_RATIO (10)
76
David Vrabeldc91c722011-09-29 12:26:19 +010077static void __init xen_add_extra_mem(u64 start, u64 size)
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070078{
David Vrabeldc91c722011-09-29 12:26:19 +010079 int i;
Konrad Rzeszutek Wilk6eaa4122011-01-18 20:09:41 -050080
David Vrabeldc91c722011-09-29 12:26:19 +010081 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
82 /* Add new region. */
83 if (xen_extra_mem[i].size == 0) {
84 xen_extra_mem[i].start = start;
85 xen_extra_mem[i].size = size;
86 break;
87 }
88 /* Append to existing region. */
89 if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) {
90 xen_extra_mem[i].size += size;
91 break;
92 }
93 }
94 if (i == XEN_EXTRA_MEM_MAX_REGIONS)
95 printk(KERN_WARNING "Warning: not enough extra memory regions\n");
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070096
Tejun Heod4bbf7e2011-11-28 09:46:22 -080097 memblock_reserve(start, size);
Juergen Gross5b8e7d82014-11-28 11:53:55 +010098}
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070099
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100100static void __init xen_del_extra_mem(u64 start, u64 size)
101{
102 int i;
103 u64 start_r, size_r;
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700104
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100105 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
106 start_r = xen_extra_mem[i].start;
107 size_r = xen_extra_mem[i].size;
Konrad Rzeszutek Wilkc96aae12012-08-17 16:43:28 -0400108
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100109 /* Start of region. */
110 if (start_r == start) {
111 BUG_ON(size > size_r);
112 xen_extra_mem[i].start += size;
113 xen_extra_mem[i].size -= size;
114 break;
115 }
116 /* End of region. */
117 if (start_r + size_r == start + size) {
118 BUG_ON(size > size_r);
119 xen_extra_mem[i].size -= size;
120 break;
121 }
122 /* Mid of region. */
123 if (start > start_r && start < start_r + size_r) {
124 BUG_ON(start + size > start_r + size_r);
125 xen_extra_mem[i].size = start - start_r;
126 /* Calling memblock_reserve() again is okay. */
127 xen_add_extra_mem(start + size, start_r + size_r -
128 (start + size));
129 break;
130 }
131 }
132 memblock_free(start, size);
133}
134
135/*
136 * Called during boot before the p2m list can take entries beyond the
137 * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
138 * invalid.
139 */
140unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
141{
142 int i;
143 unsigned long addr = PFN_PHYS(pfn);
144
145 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
146 if (addr >= xen_extra_mem[i].start &&
147 addr < xen_extra_mem[i].start + xen_extra_mem[i].size)
148 return INVALID_P2M_ENTRY;
149 }
150
151 return IDENTITY_FRAME(pfn);
152}
153
154/*
155 * Mark all pfns of extra mem as invalid in p2m list.
156 */
157void __init xen_inv_extra_mem(void)
158{
159 unsigned long pfn, pfn_s, pfn_e;
160 int i;
161
162 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
163 pfn_s = PFN_DOWN(xen_extra_mem[i].start);
164 pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size);
165 for (pfn = pfn_s; pfn < pfn_e; pfn++)
166 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
Konrad Rzeszutek Wilkc96aae12012-08-17 16:43:28 -0400167 }
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700168}
169
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700170/*
171 * Finds the next RAM pfn available in the E820 map after min_pfn.
172 * This function updates min_pfn with the pfn found and returns
173 * the size of that range or zero if not found.
174 */
175static unsigned long __init xen_find_pfn_range(
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400176 const struct e820entry *list, size_t map_size,
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700177 unsigned long *min_pfn)
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400178{
179 const struct e820entry *entry;
180 unsigned int i;
181 unsigned long done = 0;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400182
183 for (i = 0, entry = list; i < map_size; i++, entry++) {
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400184 unsigned long s_pfn;
185 unsigned long e_pfn;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400186
187 if (entry->type != E820_RAM)
188 continue;
189
zhenzhong.duanc3d93f82012-07-18 13:06:39 +0800190 e_pfn = PFN_DOWN(entry->addr + entry->size);
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400191
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700192 /* We only care about E820 after this */
193 if (e_pfn < *min_pfn)
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400194 continue;
195
zhenzhong.duanc3d93f82012-07-18 13:06:39 +0800196 s_pfn = PFN_UP(entry->addr);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700197
198 /* If min_pfn falls within the E820 entry, we want to start
199 * at the min_pfn PFN.
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400200 */
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700201 if (s_pfn <= *min_pfn) {
202 done = e_pfn - *min_pfn;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400203 } else {
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700204 done = e_pfn - s_pfn;
205 *min_pfn = s_pfn;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400206 }
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700207 break;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400208 }
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700209
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400210 return done;
211}
David Vrabel83d51ab2012-05-03 16:15:42 +0100212
Juergen Gross1f3ac862014-11-28 11:53:53 +0100213static int __init xen_free_mfn(unsigned long mfn)
214{
215 struct xen_memory_reservation reservation = {
216 .address_bits = 0,
217 .extent_order = 0,
218 .domid = DOMID_SELF
219 };
220
221 set_xen_guest_handle(reservation.extent_start, &mfn);
222 reservation.nr_extents = 1;
223
224 return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
225}
226
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700227/*
Juergen Gross1f3ac862014-11-28 11:53:53 +0100228 * This releases a chunk of memory and then does the identity map. It's used
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700229 * as a fallback if the remapping fails.
230 */
231static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
David Vrabelbc7142c2015-01-07 11:01:08 +0000232 unsigned long end_pfn, unsigned long nr_pages, unsigned long *released)
David Vrabel83d51ab2012-05-03 16:15:42 +0100233{
Juergen Gross1f3ac862014-11-28 11:53:53 +0100234 unsigned long pfn, end;
235 int ret;
236
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700237 WARN_ON(start_pfn > end_pfn);
David Vrabel83d51ab2012-05-03 16:15:42 +0100238
David Vrabelbc7142c2015-01-07 11:01:08 +0000239 /* Release pages first. */
Juergen Gross1f3ac862014-11-28 11:53:53 +0100240 end = min(end_pfn, nr_pages);
241 for (pfn = start_pfn; pfn < end; pfn++) {
242 unsigned long mfn = pfn_to_mfn(pfn);
243
244 /* Make sure pfn exists to start with */
245 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
246 continue;
247
248 ret = xen_free_mfn(mfn);
249 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
250
251 if (ret == 1) {
David Vrabelbc7142c2015-01-07 11:01:08 +0000252 (*released)++;
Juergen Gross1f3ac862014-11-28 11:53:53 +0100253 if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
254 break;
Juergen Gross1f3ac862014-11-28 11:53:53 +0100255 } else
256 break;
257 }
258
David Vrabelbc7142c2015-01-07 11:01:08 +0000259 set_phys_range_identity(start_pfn, end_pfn);
David Vrabel83d51ab2012-05-03 16:15:42 +0100260}
261
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700262/*
Juergen Gross1f3ac862014-11-28 11:53:53 +0100263 * Helper function to update the p2m and m2p tables and kernel mapping.
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700264 */
Juergen Gross1f3ac862014-11-28 11:53:53 +0100265static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700266{
267 struct mmu_update update = {
268 .ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
269 .val = pfn
270 };
271
272 /* Update p2m */
Juergen Gross1f3ac862014-11-28 11:53:53 +0100273 if (!set_phys_to_machine(pfn, mfn)) {
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700274 WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
275 pfn, mfn);
Juergen Gross1f3ac862014-11-28 11:53:53 +0100276 BUG();
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700277 }
278
279 /* Update m2p */
280 if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
281 WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
282 mfn, pfn);
Juergen Gross1f3ac862014-11-28 11:53:53 +0100283 BUG();
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700284 }
285
Juergen Gross1f3ac862014-11-28 11:53:53 +0100286 /* Update kernel mapping, but not for highmem. */
287 if ((pfn << PAGE_SHIFT) >= __pa(high_memory))
288 return;
289
290 if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
291 mfn_pte(mfn, PAGE_KERNEL), 0)) {
292 WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
293 mfn, pfn);
294 BUG();
295 }
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700296}
297
298/*
299 * This function updates the p2m and m2p tables with an identity map from
Juergen Gross1f3ac862014-11-28 11:53:53 +0100300 * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
301 * original allocation at remap_pfn. The information needed for remapping is
302 * saved in the memory itself to avoid the need for allocating buffers. The
303 * complete remap information is contained in a list of MFNs each containing
304 * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
305 * This enables us to preserve the original mfn sequence while doing the
306 * remapping at a time when the memory management is capable of allocating
307 * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
308 * its callers.
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700309 */
Juergen Gross1f3ac862014-11-28 11:53:53 +0100310static void __init xen_do_set_identity_and_remap_chunk(
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700311 unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
312{
Juergen Gross1f3ac862014-11-28 11:53:53 +0100313 unsigned long buf = (unsigned long)&xen_remap_buf;
314 unsigned long mfn_save, mfn;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700315 unsigned long ident_pfn_iter, remap_pfn_iter;
Juergen Gross1f3ac862014-11-28 11:53:53 +0100316 unsigned long ident_end_pfn = start_pfn + size;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700317 unsigned long left = size;
Juergen Gross1f3ac862014-11-28 11:53:53 +0100318 unsigned int i, chunk;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700319
320 WARN_ON(size == 0);
321
322 BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
323
Juergen Gross1f3ac862014-11-28 11:53:53 +0100324 mfn_save = virt_to_mfn(buf);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700325
Juergen Gross1f3ac862014-11-28 11:53:53 +0100326 for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
327 ident_pfn_iter < ident_end_pfn;
328 ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
329 chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700330
Juergen Gross1f3ac862014-11-28 11:53:53 +0100331 /* Map first pfn to xen_remap_buf */
332 mfn = pfn_to_mfn(ident_pfn_iter);
333 set_pte_mfn(buf, mfn, PAGE_KERNEL);
334
335 /* Save mapping information in page */
336 xen_remap_buf.next_area_mfn = xen_remap_mfn;
337 xen_remap_buf.target_pfn = remap_pfn_iter;
338 xen_remap_buf.size = chunk;
339 for (i = 0; i < chunk; i++)
340 xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
341
342 /* Put remap buf into list. */
343 xen_remap_mfn = mfn;
344
345 /* Set identity map */
David Vrabelbc7142c2015-01-07 11:01:08 +0000346 set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700347
Juergen Gross1f3ac862014-11-28 11:53:53 +0100348 left -= chunk;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700349 }
350
Juergen Gross1f3ac862014-11-28 11:53:53 +0100351 /* Restore old xen_remap_buf mapping */
352 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700353}
354
355/*
356 * This function takes a contiguous pfn range that needs to be identity mapped
357 * and:
358 *
359 * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
360 * 2) Calls the do_ function to actually do the mapping/remapping work.
361 *
362 * The goal is to not allocate additional memory but to remap the existing
363 * pages. In the case of an error the underlying memory is simply released back
364 * to Xen and not remapped.
365 */
Juergen Gross76f0a482014-12-08 06:32:19 +0100366static unsigned long __init xen_set_identity_and_remap_chunk(
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700367 const struct e820entry *list, size_t map_size, unsigned long start_pfn,
368 unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn,
David Vrabela97dae12015-01-07 11:21:50 +0000369 unsigned long *released, unsigned long *remapped)
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700370{
371 unsigned long pfn;
372 unsigned long i = 0;
373 unsigned long n = end_pfn - start_pfn;
374
375 while (i < n) {
376 unsigned long cur_pfn = start_pfn + i;
377 unsigned long left = n - i;
378 unsigned long size = left;
379 unsigned long remap_range_size;
380
381 /* Do not remap pages beyond the current allocation */
382 if (cur_pfn >= nr_pages) {
383 /* Identity map remaining pages */
David Vrabelbc7142c2015-01-07 11:01:08 +0000384 set_phys_range_identity(cur_pfn, cur_pfn + size);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700385 break;
386 }
387 if (cur_pfn + size > nr_pages)
388 size = nr_pages - cur_pfn;
389
390 remap_range_size = xen_find_pfn_range(list, map_size,
391 &remap_pfn);
392 if (!remap_range_size) {
393 pr_warning("Unable to find available pfn range, not remapping identity pages\n");
394 xen_set_identity_and_release_chunk(cur_pfn,
David Vrabelbc7142c2015-01-07 11:01:08 +0000395 cur_pfn + left, nr_pages, released);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700396 break;
397 }
398 /* Adjust size to fit in current e820 RAM region */
399 if (size > remap_range_size)
400 size = remap_range_size;
401
Juergen Gross1f3ac862014-11-28 11:53:53 +0100402 xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700403
404 /* Update variables to reflect new mappings. */
405 i += size;
406 remap_pfn += size;
David Vrabela97dae12015-01-07 11:21:50 +0000407 *remapped += size;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700408 }
409
410 /*
411 * If the PFNs are currently mapped, the VA mapping also needs
412 * to be updated to be 1:1.
413 */
414 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
415 (void)HYPERVISOR_update_va_mapping(
416 (unsigned long)__va(pfn << PAGE_SHIFT),
417 mfn_pte(pfn, PAGE_KERNEL_IO), 0);
418
419 return remap_pfn;
420}
421
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100422static void __init xen_set_identity_and_remap(
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700423 const struct e820entry *list, size_t map_size, unsigned long nr_pages,
David Vrabela97dae12015-01-07 11:21:50 +0000424 unsigned long *released, unsigned long *remapped)
Miroslav Rezanina093d7b42009-09-16 03:56:17 -0400425{
David Vrabelf3f436e2011-09-28 17:46:36 +0100426 phys_addr_t start = 0;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700427 unsigned long last_pfn = nr_pages;
David Vrabelf3f436e2011-09-28 17:46:36 +0100428 const struct e820entry *entry;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700429 unsigned long num_released = 0;
David Vrabela97dae12015-01-07 11:21:50 +0000430 unsigned long num_remapped = 0;
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500431 int i;
432
David Vrabelf3f436e2011-09-28 17:46:36 +0100433 /*
434 * Combine non-RAM regions and gaps until a RAM region (or the
435 * end of the map) is reached, then set the 1:1 map and
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700436 * remap the memory in those non-RAM regions.
David Vrabelf3f436e2011-09-28 17:46:36 +0100437 *
438 * The combined non-RAM regions are rounded to a whole number
439 * of pages so any partial pages are accessible via the 1:1
440 * mapping. This is needed for some BIOSes that put (for
441 * example) the DMI tables in a reserved region that begins on
442 * a non-page boundary.
443 */
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500444 for (i = 0, entry = list; i < map_size; i++, entry++) {
David Vrabelf3f436e2011-09-28 17:46:36 +0100445 phys_addr_t end = entry->addr + entry->size;
David Vrabelf3f436e2011-09-28 17:46:36 +0100446 if (entry->type == E820_RAM || i == map_size - 1) {
447 unsigned long start_pfn = PFN_DOWN(start);
448 unsigned long end_pfn = PFN_UP(end);
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500449
David Vrabelf3f436e2011-09-28 17:46:36 +0100450 if (entry->type == E820_RAM)
451 end_pfn = PFN_UP(entry->addr);
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500452
David Vrabel83d51ab2012-05-03 16:15:42 +0100453 if (start_pfn < end_pfn)
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700454 last_pfn = xen_set_identity_and_remap_chunk(
455 list, map_size, start_pfn,
456 end_pfn, nr_pages, last_pfn,
David Vrabela97dae12015-01-07 11:21:50 +0000457 &num_released, &num_remapped);
David Vrabelf3f436e2011-09-28 17:46:36 +0100458 start = end;
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500459 }
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500460 }
David Vrabelf3f436e2011-09-28 17:46:36 +0100461
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700462 *released = num_released;
David Vrabela97dae12015-01-07 11:21:50 +0000463 *remapped = num_remapped;
David Vrabelf3f436e2011-09-28 17:46:36 +0100464
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700465 pr_info("Released %ld page(s)\n", num_released);
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500466}
Juergen Gross1f3ac862014-11-28 11:53:53 +0100467
468/*
469 * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
470 * The remap information (which mfn remap to which pfn) is contained in the
471 * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
472 * This scheme allows to remap the different chunks in arbitrary order while
473 * the resulting mapping will be independant from the order.
474 */
475void __init xen_remap_memory(void)
476{
477 unsigned long buf = (unsigned long)&xen_remap_buf;
478 unsigned long mfn_save, mfn, pfn;
479 unsigned long remapped = 0;
480 unsigned int i;
481 unsigned long pfn_s = ~0UL;
482 unsigned long len = 0;
483
484 mfn_save = virt_to_mfn(buf);
485
486 while (xen_remap_mfn != INVALID_P2M_ENTRY) {
487 /* Map the remap information */
488 set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
489
490 BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
491
492 pfn = xen_remap_buf.target_pfn;
493 for (i = 0; i < xen_remap_buf.size; i++) {
494 mfn = xen_remap_buf.mfns[i];
495 xen_update_mem_tables(pfn, mfn);
496 remapped++;
497 pfn++;
498 }
499 if (pfn_s == ~0UL || pfn == pfn_s) {
500 pfn_s = xen_remap_buf.target_pfn;
501 len += xen_remap_buf.size;
502 } else if (pfn_s + len == xen_remap_buf.target_pfn) {
503 len += xen_remap_buf.size;
504 } else {
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100505 xen_del_extra_mem(PFN_PHYS(pfn_s), PFN_PHYS(len));
Juergen Gross1f3ac862014-11-28 11:53:53 +0100506 pfn_s = xen_remap_buf.target_pfn;
507 len = xen_remap_buf.size;
508 }
509
510 mfn = xen_remap_mfn;
511 xen_remap_mfn = xen_remap_buf.next_area_mfn;
512 }
513
514 if (pfn_s != ~0UL && len)
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100515 xen_del_extra_mem(PFN_PHYS(pfn_s), PFN_PHYS(len));
Juergen Gross1f3ac862014-11-28 11:53:53 +0100516
517 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
518
519 pr_info("Remapped %ld page(s)\n", remapped);
520}
521
David Vrabeld312ae872011-08-19 15:57:16 +0100522static unsigned long __init xen_get_max_pages(void)
523{
524 unsigned long max_pages = MAX_DOMAIN_PAGES;
525 domid_t domid = DOMID_SELF;
526 int ret;
527
Ian Campbelld3db7282011-12-14 12:16:08 +0000528 /*
529 * For the initial domain we use the maximum reservation as
530 * the maximum page.
531 *
532 * For guest domains the current maximum reservation reflects
533 * the current maximum rather than the static maximum. In this
534 * case the e820 map provided to us will cover the static
535 * maximum region.
536 */
537 if (xen_initial_domain()) {
538 ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
539 if (ret > 0)
540 max_pages = ret;
541 }
542
David Vrabeld312ae872011-08-19 15:57:16 +0100543 return min(max_pages, MAX_DOMAIN_PAGES);
544}
545
David Vrabeldc91c722011-09-29 12:26:19 +0100546static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
547{
548 u64 end = start + size;
549
550 /* Align RAM regions to page boundaries. */
551 if (type == E820_RAM) {
552 start = PAGE_ALIGN(start);
553 end &= ~((u64)PAGE_SIZE - 1);
554 }
555
556 e820_add_region(start, end - start, type);
557}
558
David Vrabel3bc38cb2013-08-16 15:42:55 +0100559void xen_ignore_unusable(struct e820entry *list, size_t map_size)
560{
561 struct e820entry *entry;
562 unsigned int i;
563
564 for (i = 0, entry = list; i < map_size; i++, entry++) {
565 if (entry->type == E820_UNUSABLE)
566 entry->type = E820_RAM;
567 }
568}
569
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700570/**
571 * machine_specific_memory_setup - Hook for machine specific memory setup.
572 **/
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700573char * __init xen_memory_setup(void)
574{
Ian Campbell35ae11f2009-02-06 19:09:48 -0800575 static struct e820entry map[E820MAX] __initdata;
576
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700577 unsigned long max_pfn = xen_start_info->nr_pages;
Ian Campbell35ae11f2009-02-06 19:09:48 -0800578 unsigned long long mem_end;
579 int rc;
580 struct xen_memory_map memmap;
David Vrabeldc91c722011-09-29 12:26:19 +0100581 unsigned long max_pages;
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700582 unsigned long extra_pages = 0;
David Vrabela97dae12015-01-07 11:21:50 +0000583 unsigned long remapped_pages;
Ian Campbell35ae11f2009-02-06 19:09:48 -0800584 int i;
Ian Campbell9e9a5fc2010-09-02 16:16:00 +0100585 int op;
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700586
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100587 max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
Ian Campbell35ae11f2009-02-06 19:09:48 -0800588 mem_end = PFN_PHYS(max_pfn);
589
590 memmap.nr_entries = E820MAX;
591 set_xen_guest_handle(memmap.buffer, map);
592
Ian Campbell9e9a5fc2010-09-02 16:16:00 +0100593 op = xen_initial_domain() ?
594 XENMEM_machine_memory_map :
595 XENMEM_memory_map;
596 rc = HYPERVISOR_memory_op(op, &memmap);
Ian Campbell35ae11f2009-02-06 19:09:48 -0800597 if (rc == -ENOSYS) {
Ian Campbell9ec23a7f2010-10-28 11:32:29 -0700598 BUG_ON(xen_initial_domain());
Ian Campbell35ae11f2009-02-06 19:09:48 -0800599 memmap.nr_entries = 1;
600 map[0].addr = 0ULL;
601 map[0].size = mem_end;
602 /* 8MB slack (to balance backend allocations). */
603 map[0].size += 8ULL << 20;
604 map[0].type = E820_RAM;
605 rc = 0;
606 }
607 BUG_ON(rc);
Martin Kelly1ea644c2014-10-16 20:48:11 -0700608 BUG_ON(memmap.nr_entries == 0);
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100609
David Vrabel3bc38cb2013-08-16 15:42:55 +0100610 /*
611 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
612 * regions, so if we're using the machine memory map leave the
613 * region as RAM as it is in the pseudo-physical map.
614 *
615 * UNUSABLE regions in domUs are not handled and will need
616 * a patch in the future.
617 */
618 if (xen_initial_domain())
619 xen_ignore_unusable(map, memmap.nr_entries);
620
David Vrabeldc91c722011-09-29 12:26:19 +0100621 /* Make sure the Xen-supplied memory map is well-ordered. */
622 sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
Jeremy Fitzhardingebe5bf9f2008-06-16 14:54:46 -0700623
David Vrabeldc91c722011-09-29 12:26:19 +0100624 max_pages = xen_get_max_pages();
625 if (max_pages > max_pfn)
626 extra_pages += max_pages - max_pfn;
Stefano Stabellini7cb31b72011-01-27 10:13:25 -0500627
David Vrabelf3f436e2011-09-28 17:46:36 +0100628 /*
Juergen Gross1f3ac862014-11-28 11:53:53 +0100629 * Set identity map on non-RAM pages and prepare remapping the
630 * underlying RAM.
David Vrabelf3f436e2011-09-28 17:46:36 +0100631 */
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100632 xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn,
David Vrabela97dae12015-01-07 11:21:50 +0000633 &xen_released_pages, &remapped_pages);
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700634
Konrad Rzeszutek Wilk58b7b532012-05-29 12:36:43 -0400635 extra_pages += xen_released_pages;
David Vrabela97dae12015-01-07 11:21:50 +0000636 extra_pages += remapped_pages;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400637
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400638 /*
David Vrabeldc91c722011-09-29 12:26:19 +0100639 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
640 * factor the base size. On non-highmem systems, the base
641 * size is the full initial memory allocation; on highmem it
642 * is limited to the max size of lowmem, so that it doesn't
643 * get completely filled.
644 *
645 * In principle there could be a problem in lowmem systems if
646 * the initial memory is also very large with respect to
647 * lowmem, but we won't try to deal with that here.
648 */
649 extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
650 extra_pages);
David Vrabeldc91c722011-09-29 12:26:19 +0100651 i = 0;
652 while (i < memmap.nr_entries) {
653 u64 addr = map[i].addr;
654 u64 size = map[i].size;
655 u32 type = map[i].type;
656
657 if (type == E820_RAM) {
658 if (addr < mem_end) {
659 size = min(size, mem_end - addr);
660 } else if (extra_pages) {
661 size = min(size, (u64)extra_pages * PAGE_SIZE);
662 extra_pages -= size / PAGE_SIZE;
663 xen_add_extra_mem(addr, size);
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100664 xen_max_p2m_pfn = PFN_DOWN(addr + size);
David Vrabeldc91c722011-09-29 12:26:19 +0100665 } else
666 type = E820_UNUSABLE;
Jeremy Fitzhardinge36545812010-09-29 16:54:33 -0700667 }
668
David Vrabeldc91c722011-09-29 12:26:19 +0100669 xen_align_and_add_e820_region(addr, size, type);
Jeremy Fitzhardingeb5b43ce2010-09-02 17:10:12 -0700670
David Vrabeldc91c722011-09-29 12:26:19 +0100671 map[i].addr += size;
672 map[i].size -= size;
673 if (map[i].size == 0)
674 i++;
Ian Campbell35ae11f2009-02-06 19:09:48 -0800675 }
Jeremy Fitzhardingeb792c752008-06-16 14:54:49 -0700676
677 /*
David Vrabel25b884a2014-01-03 15:46:10 +0000678 * Set the rest as identity mapped, in case PCI BARs are
679 * located here.
680 *
681 * PFNs above MAX_P2M_PFN are considered identity mapped as
682 * well.
683 */
684 set_phys_range_identity(map[i-1].addr / PAGE_SIZE, ~0ul);
685
686 /*
Ian Campbell9ec23a7f2010-10-28 11:32:29 -0700687 * In domU, the ISA region is normal, usable memory, but we
688 * reserve ISA memory anyway because too many things poke
Jeremy Fitzhardingeb792c752008-06-16 14:54:49 -0700689 * about in there.
690 */
691 e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
692 E820_RESERVED);
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700693
Jeremy Fitzhardingebe5bf9f2008-06-16 14:54:46 -0700694 /*
695 * Reserve Xen bits:
696 * - mfn_list
697 * - xen_start_info
698 * See comment above "struct start_info" in <xen/interface/xen.h>
Konrad Rzeszutek Wilk51faaf22012-08-22 13:00:10 -0400699 * We tried to make the the memblock_reserve more selective so
700 * that it would be clear what region is reserved. Sadly we ran
701 * in the problem wherein on a 64-bit hypervisor with a 32-bit
702 * initial domain, the pt_base has the cr3 value which is not
703 * neccessarily where the pagetable starts! As Jan put it: "
704 * Actually, the adjustment turns out to be correct: The page
705 * tables for a 32-on-64 dom0 get allocated in the order "first L1",
706 * "first L2", "first L3", so the offset to the page table base is
707 * indeed 2. When reading xen/include/public/xen.h's comment
708 * very strictly, this is not a violation (since there nothing is said
709 * that the first thing in the page table space is pointed to by
710 * pt_base; I admit that this seems to be implied though, namely
711 * do I think that it is implied that the page table space is the
712 * range [pt_base, pt_base + nt_pt_frames), whereas that
713 * range here indeed is [pt_base - 2, pt_base - 2 + nt_pt_frames),
714 * which - without a priori knowledge - the kernel would have
715 * difficulty to figure out)." - so lets just fall back to the
716 * easy way and reserve the whole region.
Jeremy Fitzhardingebe5bf9f2008-06-16 14:54:46 -0700717 */
Tejun Heo24aa0782011-07-12 11:16:06 +0200718 memblock_reserve(__pa(xen_start_info->mfn_list),
719 xen_start_info->pt_base - xen_start_info->mfn_list);
Jeremy Fitzhardingebe5bf9f2008-06-16 14:54:46 -0700720
721 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
722
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700723 return "Xen";
724}
725
Roland McGrathd2eea682007-07-20 00:31:43 -0700726/*
David Vrabelabacaad2014-06-02 17:58:01 +0100727 * Machine specific memory setup for auto-translated guests.
728 */
729char * __init xen_auto_xlated_memory_setup(void)
730{
731 static struct e820entry map[E820MAX] __initdata;
732
733 struct xen_memory_map memmap;
734 int i;
735 int rc;
736
737 memmap.nr_entries = E820MAX;
738 set_xen_guest_handle(memmap.buffer, map);
739
740 rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
741 if (rc < 0)
742 panic("No memory map (%d)\n", rc);
743
744 sanitize_e820_map(map, ARRAY_SIZE(map), &memmap.nr_entries);
745
746 for (i = 0; i < memmap.nr_entries; i++)
747 e820_add_region(map[i].addr, map[i].size, map[i].type);
748
749 memblock_reserve(__pa(xen_start_info->mfn_list),
750 xen_start_info->pt_base - xen_start_info->mfn_list);
751
752 return "Xen";
753}
754
755/*
Roland McGrathd2eea682007-07-20 00:31:43 -0700756 * Set the bit indicating "nosegneg" library variants should be used.
Jeremy Fitzhardinge6a52e4b2008-07-12 02:22:00 -0700757 * We only need to bother in pure 32-bit mode; compat 32-bit processes
758 * can have un-truncated segments, so wrapping around is allowed.
Roland McGrathd2eea682007-07-20 00:31:43 -0700759 */
Sam Ravnborg08b6d292008-01-30 13:33:25 +0100760static void __init fiddle_vdso(void)
Roland McGrathd2eea682007-07-20 00:31:43 -0700761{
Jeremy Fitzhardinge6a52e4b2008-07-12 02:22:00 -0700762#ifdef CONFIG_X86_32
Andy Lutomirski6f121e52014-05-05 12:19:34 -0700763 /*
764 * This could be called before selected_vdso32 is initialized, so
765 * just fiddle with both possible images. vdso_image_32_syscall
766 * can't be selected, since it only exists on 64-bit systems.
767 */
Jeremy Fitzhardinge6a52e4b2008-07-12 02:22:00 -0700768 u32 *mask;
Andy Lutomirski6f121e52014-05-05 12:19:34 -0700769 mask = vdso_image_32_int80.data +
770 vdso_image_32_int80.sym_VDSO32_NOTE_MASK;
Jeremy Fitzhardinge6a52e4b2008-07-12 02:22:00 -0700771 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
Andy Lutomirski6f121e52014-05-05 12:19:34 -0700772 mask = vdso_image_32_sysenter.data +
773 vdso_image_32_sysenter.sym_VDSO32_NOTE_MASK;
Roland McGrathd2eea682007-07-20 00:31:43 -0700774 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700775#endif
Roland McGrathd2eea682007-07-20 00:31:43 -0700776}
777
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400778static int register_callback(unsigned type, const void *func)
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700779{
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700780 struct callback_register callback = {
781 .type = type,
782 .address = XEN_CALLBACK(__KERNEL_CS, func),
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700783 .flags = CALLBACKF_mask_events,
784 };
785
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700786 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
787}
788
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400789void xen_enable_sysenter(void)
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700790{
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700791 int ret;
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700792 unsigned sysenter_feature;
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700793
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700794#ifdef CONFIG_X86_32
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700795 sysenter_feature = X86_FEATURE_SEP;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700796#else
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700797 sysenter_feature = X86_FEATURE_SYSENTER32;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700798#endif
799
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700800 if (!boot_cpu_has(sysenter_feature))
801 return;
802
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700803 ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700804 if(ret != 0)
805 setup_clear_cpu_cap(sysenter_feature);
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700806}
807
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400808void xen_enable_syscall(void)
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700809{
810#ifdef CONFIG_X86_64
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700811 int ret;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700812
813 ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
814 if (ret != 0) {
Jeremy Fitzhardinged5303b82008-07-12 02:22:06 -0700815 printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700816 /* Pretty fatal; 64-bit userspace has no other
817 mechanism for syscalls. */
818 }
819
820 if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700821 ret = register_callback(CALLBACKTYPE_syscall32,
822 xen_syscall32_target);
Jeremy Fitzhardinged5303b82008-07-12 02:22:06 -0700823 if (ret != 0)
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700824 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700825 }
826#endif /* CONFIG_X86_64 */
827}
David Vrabelea9f9272014-06-16 13:07:00 +0200828
Mukesh Rathord285d682013-12-13 12:45:31 -0500829void __init xen_pvmmu_arch_setup(void)
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700830{
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700831 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
832 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
833
Mukesh Rathord285d682013-12-13 12:45:31 -0500834 HYPERVISOR_vm_assist(VMASST_CMD_enable,
835 VMASST_TYPE_pae_extended_cr3);
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700836
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700837 if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
838 register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
839 BUG();
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700840
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700841 xen_enable_sysenter();
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700842 xen_enable_syscall();
Mukesh Rathord285d682013-12-13 12:45:31 -0500843}
844
845/* This function is not called for HVM domains */
846void __init xen_arch_setup(void)
847{
848 xen_panic_handler_init();
849 if (!xen_feature(XENFEAT_auto_translated_physmap))
850 xen_pvmmu_arch_setup();
851
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700852#ifdef CONFIG_ACPI
853 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
854 printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
855 disable_acpi();
856 }
857#endif
858
859 memcpy(boot_command_line, xen_start_info->cmd_line,
860 MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
861 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
862
Jeremy Fitzhardingebc15fde2010-11-22 17:17:50 -0800863 /* Set up idle, making sure it calls safe_halt() pvop */
Len Brownd91ee582011-04-01 18:28:35 -0400864 disable_cpuidle();
Konrad Rzeszutek Wilk48cdd822012-03-13 20:06:57 -0400865 disable_cpufreq();
Len Brown6a377dd2013-02-09 23:08:07 -0500866 WARN_ON(xen_set_default_idle());
Roland McGrathd2eea682007-07-20 00:31:43 -0700867 fiddle_vdso();
Konrad Rzeszutek Wilk8d54db792012-08-17 10:22:37 -0400868#ifdef CONFIG_NUMA
869 numa_off = 1;
870#endif
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700871}