blob: 55f388ef481a40a4020b51ca7dd43b7da56ff97c [file] [log] [blame]
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -07001/*
2 * Machine specific setup for xen
3 *
4 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
5 */
6
7#include <linux/module.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/pm.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -070011#include <linux/memblock.h>
Len Brownd91ee582011-04-01 18:28:35 -040012#include <linux/cpuidle.h>
Konrad Rzeszutek Wilk48cdd822012-03-13 20:06:57 -040013#include <linux/cpufreq.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070014
15#include <asm/elf.h>
Roland McGrath6c3652e2008-01-30 13:30:42 +010016#include <asm/vdso.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070017#include <asm/e820.h>
18#include <asm/setup.h>
Jeremy Fitzhardingeb792c752008-06-16 14:54:49 -070019#include <asm/acpi.h>
Konrad Rzeszutek Wilk8d54db792012-08-17 10:22:37 -040020#include <asm/numa.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070021#include <asm/xen/hypervisor.h>
22#include <asm/xen/hypercall.h>
23
Ian Campbell45263cb2010-10-25 16:32:29 -070024#include <xen/xen.h>
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +010025#include <xen/page.h>
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -070026#include <xen/interface/callback.h>
Ian Campbell35ae11f2009-02-06 19:09:48 -080027#include <xen/interface/memory.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070028#include <xen/interface/physdev.h>
29#include <xen/features.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070030#include "xen-ops.h"
Roland McGrathd2eea682007-07-20 00:31:43 -070031#include "vdso.h"
Matt Rushton4fbb67e32014-08-11 11:57:57 -070032#include "p2m.h"
Juergen Gross1f3ac862014-11-28 11:53:53 +010033#include "mmu.h"
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070034
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070035/* Amount of extra memory space we add to the e820 ranges */
David Vrabel8b5d44a2011-09-28 17:46:34 +010036struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070037
David Vrabelaa244112011-09-28 17:46:32 +010038/* Number of pages released from the initial allocation. */
39unsigned long xen_released_pages;
40
Juergen Gross1f3ac862014-11-28 11:53:53 +010041/*
42 * Buffer used to remap identity mapped pages. We only need the virtual space.
43 * The physical page behind this address is remapped as needed to different
44 * buffer pages.
45 */
46#define REMAP_SIZE (P2M_PER_PAGE - 3)
47static struct {
48 unsigned long next_area_mfn;
49 unsigned long target_pfn;
50 unsigned long size;
51 unsigned long mfns[REMAP_SIZE];
52} xen_remap_buf __initdata __aligned(PAGE_SIZE);
53static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
Matt Rushton4fbb67e32014-08-11 11:57:57 -070054
Jeremy Fitzhardinge698bb8d2010-09-14 10:19:14 -070055/*
56 * The maximum amount of extra memory compared to the base size. The
57 * main scaling factor is the size of struct page. At extreme ratios
58 * of base:extra, all the base memory can be filled with page
59 * structures for the extra memory, leaving no space for anything
60 * else.
61 *
62 * 10x seems like a reasonable balance between scaling flexibility and
63 * leaving a practically usable system.
64 */
65#define EXTRA_MEM_RATIO (10)
66
Juergen Gross3ba5c862015-01-28 07:44:22 +010067static void __init xen_add_extra_mem(phys_addr_t start, phys_addr_t size)
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070068{
David Vrabeldc91c722011-09-29 12:26:19 +010069 int i;
Konrad Rzeszutek Wilk6eaa4122011-01-18 20:09:41 -050070
David Vrabeldc91c722011-09-29 12:26:19 +010071 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
72 /* Add new region. */
73 if (xen_extra_mem[i].size == 0) {
74 xen_extra_mem[i].start = start;
75 xen_extra_mem[i].size = size;
76 break;
77 }
78 /* Append to existing region. */
79 if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) {
80 xen_extra_mem[i].size += size;
81 break;
82 }
83 }
84 if (i == XEN_EXTRA_MEM_MAX_REGIONS)
85 printk(KERN_WARNING "Warning: not enough extra memory regions\n");
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070086
Tejun Heod4bbf7e2011-11-28 09:46:22 -080087 memblock_reserve(start, size);
Juergen Gross5b8e7d82014-11-28 11:53:55 +010088}
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070089
Juergen Gross3ba5c862015-01-28 07:44:22 +010090static void __init xen_del_extra_mem(phys_addr_t start, phys_addr_t size)
Juergen Gross5b8e7d82014-11-28 11:53:55 +010091{
92 int i;
Juergen Gross3ba5c862015-01-28 07:44:22 +010093 phys_addr_t start_r, size_r;
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070094
Juergen Gross5b8e7d82014-11-28 11:53:55 +010095 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
96 start_r = xen_extra_mem[i].start;
97 size_r = xen_extra_mem[i].size;
Konrad Rzeszutek Wilkc96aae12012-08-17 16:43:28 -040098
Juergen Gross5b8e7d82014-11-28 11:53:55 +010099 /* Start of region. */
100 if (start_r == start) {
101 BUG_ON(size > size_r);
102 xen_extra_mem[i].start += size;
103 xen_extra_mem[i].size -= size;
104 break;
105 }
106 /* End of region. */
107 if (start_r + size_r == start + size) {
108 BUG_ON(size > size_r);
109 xen_extra_mem[i].size -= size;
110 break;
111 }
112 /* Mid of region. */
113 if (start > start_r && start < start_r + size_r) {
114 BUG_ON(start + size > start_r + size_r);
115 xen_extra_mem[i].size = start - start_r;
116 /* Calling memblock_reserve() again is okay. */
117 xen_add_extra_mem(start + size, start_r + size_r -
118 (start + size));
119 break;
120 }
121 }
122 memblock_free(start, size);
123}
124
125/*
126 * Called during boot before the p2m list can take entries beyond the
127 * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
128 * invalid.
129 */
130unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
131{
132 int i;
Juergen Grosse86f9492015-01-12 06:05:09 +0100133 phys_addr_t addr = PFN_PHYS(pfn);
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100134
135 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
136 if (addr >= xen_extra_mem[i].start &&
137 addr < xen_extra_mem[i].start + xen_extra_mem[i].size)
138 return INVALID_P2M_ENTRY;
139 }
140
141 return IDENTITY_FRAME(pfn);
142}
143
144/*
145 * Mark all pfns of extra mem as invalid in p2m list.
146 */
147void __init xen_inv_extra_mem(void)
148{
149 unsigned long pfn, pfn_s, pfn_e;
150 int i;
151
152 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
Juergen Gross9a17ad72015-01-12 06:05:10 +0100153 if (!xen_extra_mem[i].size)
154 continue;
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100155 pfn_s = PFN_DOWN(xen_extra_mem[i].start);
156 pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size);
157 for (pfn = pfn_s; pfn < pfn_e; pfn++)
158 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
Konrad Rzeszutek Wilkc96aae12012-08-17 16:43:28 -0400159 }
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700160}
161
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700162/*
163 * Finds the next RAM pfn available in the E820 map after min_pfn.
164 * This function updates min_pfn with the pfn found and returns
165 * the size of that range or zero if not found.
166 */
167static unsigned long __init xen_find_pfn_range(
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400168 const struct e820entry *list, size_t map_size,
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700169 unsigned long *min_pfn)
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400170{
171 const struct e820entry *entry;
172 unsigned int i;
173 unsigned long done = 0;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400174
175 for (i = 0, entry = list; i < map_size; i++, entry++) {
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400176 unsigned long s_pfn;
177 unsigned long e_pfn;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400178
179 if (entry->type != E820_RAM)
180 continue;
181
zhenzhong.duanc3d93f82012-07-18 13:06:39 +0800182 e_pfn = PFN_DOWN(entry->addr + entry->size);
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400183
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700184 /* We only care about E820 after this */
185 if (e_pfn < *min_pfn)
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400186 continue;
187
zhenzhong.duanc3d93f82012-07-18 13:06:39 +0800188 s_pfn = PFN_UP(entry->addr);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700189
190 /* If min_pfn falls within the E820 entry, we want to start
191 * at the min_pfn PFN.
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400192 */
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700193 if (s_pfn <= *min_pfn) {
194 done = e_pfn - *min_pfn;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400195 } else {
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700196 done = e_pfn - s_pfn;
197 *min_pfn = s_pfn;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400198 }
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700199 break;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400200 }
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700201
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400202 return done;
203}
David Vrabel83d51ab2012-05-03 16:15:42 +0100204
Juergen Gross1f3ac862014-11-28 11:53:53 +0100205static int __init xen_free_mfn(unsigned long mfn)
206{
207 struct xen_memory_reservation reservation = {
208 .address_bits = 0,
209 .extent_order = 0,
210 .domid = DOMID_SELF
211 };
212
213 set_xen_guest_handle(reservation.extent_start, &mfn);
214 reservation.nr_extents = 1;
215
216 return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
217}
218
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700219/*
Juergen Gross1f3ac862014-11-28 11:53:53 +0100220 * This releases a chunk of memory and then does the identity map. It's used
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700221 * as a fallback if the remapping fails.
222 */
223static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
David Vrabelbc7142c2015-01-07 11:01:08 +0000224 unsigned long end_pfn, unsigned long nr_pages, unsigned long *released)
David Vrabel83d51ab2012-05-03 16:15:42 +0100225{
Juergen Gross1f3ac862014-11-28 11:53:53 +0100226 unsigned long pfn, end;
227 int ret;
228
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700229 WARN_ON(start_pfn > end_pfn);
David Vrabel83d51ab2012-05-03 16:15:42 +0100230
David Vrabelbc7142c2015-01-07 11:01:08 +0000231 /* Release pages first. */
Juergen Gross1f3ac862014-11-28 11:53:53 +0100232 end = min(end_pfn, nr_pages);
233 for (pfn = start_pfn; pfn < end; pfn++) {
234 unsigned long mfn = pfn_to_mfn(pfn);
235
236 /* Make sure pfn exists to start with */
237 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
238 continue;
239
240 ret = xen_free_mfn(mfn);
241 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
242
243 if (ret == 1) {
David Vrabelbc7142c2015-01-07 11:01:08 +0000244 (*released)++;
Juergen Gross1f3ac862014-11-28 11:53:53 +0100245 if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
246 break;
Juergen Gross1f3ac862014-11-28 11:53:53 +0100247 } else
248 break;
249 }
250
David Vrabelbc7142c2015-01-07 11:01:08 +0000251 set_phys_range_identity(start_pfn, end_pfn);
David Vrabel83d51ab2012-05-03 16:15:42 +0100252}
253
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700254/*
Juergen Gross1f3ac862014-11-28 11:53:53 +0100255 * Helper function to update the p2m and m2p tables and kernel mapping.
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700256 */
Juergen Gross1f3ac862014-11-28 11:53:53 +0100257static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700258{
259 struct mmu_update update = {
Juergen Gross3ba5c862015-01-28 07:44:22 +0100260 .ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700261 .val = pfn
262 };
263
264 /* Update p2m */
Juergen Gross1f3ac862014-11-28 11:53:53 +0100265 if (!set_phys_to_machine(pfn, mfn)) {
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700266 WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
267 pfn, mfn);
Juergen Gross1f3ac862014-11-28 11:53:53 +0100268 BUG();
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700269 }
270
271 /* Update m2p */
272 if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
273 WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
274 mfn, pfn);
Juergen Gross1f3ac862014-11-28 11:53:53 +0100275 BUG();
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700276 }
277
Juergen Gross1f3ac862014-11-28 11:53:53 +0100278 /* Update kernel mapping, but not for highmem. */
Juergen Grosse86f9492015-01-12 06:05:09 +0100279 if (pfn >= PFN_UP(__pa(high_memory - 1)))
Juergen Gross1f3ac862014-11-28 11:53:53 +0100280 return;
281
282 if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
283 mfn_pte(mfn, PAGE_KERNEL), 0)) {
284 WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
285 mfn, pfn);
286 BUG();
287 }
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700288}
289
290/*
291 * This function updates the p2m and m2p tables with an identity map from
Juergen Gross1f3ac862014-11-28 11:53:53 +0100292 * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
293 * original allocation at remap_pfn. The information needed for remapping is
294 * saved in the memory itself to avoid the need for allocating buffers. The
295 * complete remap information is contained in a list of MFNs each containing
296 * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
297 * This enables us to preserve the original mfn sequence while doing the
298 * remapping at a time when the memory management is capable of allocating
299 * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
300 * its callers.
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700301 */
Juergen Gross1f3ac862014-11-28 11:53:53 +0100302static void __init xen_do_set_identity_and_remap_chunk(
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700303 unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
304{
Juergen Gross1f3ac862014-11-28 11:53:53 +0100305 unsigned long buf = (unsigned long)&xen_remap_buf;
306 unsigned long mfn_save, mfn;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700307 unsigned long ident_pfn_iter, remap_pfn_iter;
Juergen Gross1f3ac862014-11-28 11:53:53 +0100308 unsigned long ident_end_pfn = start_pfn + size;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700309 unsigned long left = size;
Juergen Gross1f3ac862014-11-28 11:53:53 +0100310 unsigned int i, chunk;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700311
312 WARN_ON(size == 0);
313
314 BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
315
Juergen Gross1f3ac862014-11-28 11:53:53 +0100316 mfn_save = virt_to_mfn(buf);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700317
Juergen Gross1f3ac862014-11-28 11:53:53 +0100318 for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
319 ident_pfn_iter < ident_end_pfn;
320 ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
321 chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700322
Juergen Gross1f3ac862014-11-28 11:53:53 +0100323 /* Map first pfn to xen_remap_buf */
324 mfn = pfn_to_mfn(ident_pfn_iter);
325 set_pte_mfn(buf, mfn, PAGE_KERNEL);
326
327 /* Save mapping information in page */
328 xen_remap_buf.next_area_mfn = xen_remap_mfn;
329 xen_remap_buf.target_pfn = remap_pfn_iter;
330 xen_remap_buf.size = chunk;
331 for (i = 0; i < chunk; i++)
332 xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
333
334 /* Put remap buf into list. */
335 xen_remap_mfn = mfn;
336
337 /* Set identity map */
David Vrabelbc7142c2015-01-07 11:01:08 +0000338 set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700339
Juergen Gross1f3ac862014-11-28 11:53:53 +0100340 left -= chunk;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700341 }
342
Juergen Gross1f3ac862014-11-28 11:53:53 +0100343 /* Restore old xen_remap_buf mapping */
344 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700345}
346
347/*
348 * This function takes a contiguous pfn range that needs to be identity mapped
349 * and:
350 *
351 * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
352 * 2) Calls the do_ function to actually do the mapping/remapping work.
353 *
354 * The goal is to not allocate additional memory but to remap the existing
355 * pages. In the case of an error the underlying memory is simply released back
356 * to Xen and not remapped.
357 */
Juergen Gross76f0a482014-12-08 06:32:19 +0100358static unsigned long __init xen_set_identity_and_remap_chunk(
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700359 const struct e820entry *list, size_t map_size, unsigned long start_pfn,
360 unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn,
David Vrabela97dae12015-01-07 11:21:50 +0000361 unsigned long *released, unsigned long *remapped)
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700362{
363 unsigned long pfn;
364 unsigned long i = 0;
365 unsigned long n = end_pfn - start_pfn;
366
367 while (i < n) {
368 unsigned long cur_pfn = start_pfn + i;
369 unsigned long left = n - i;
370 unsigned long size = left;
371 unsigned long remap_range_size;
372
373 /* Do not remap pages beyond the current allocation */
374 if (cur_pfn >= nr_pages) {
375 /* Identity map remaining pages */
David Vrabelbc7142c2015-01-07 11:01:08 +0000376 set_phys_range_identity(cur_pfn, cur_pfn + size);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700377 break;
378 }
379 if (cur_pfn + size > nr_pages)
380 size = nr_pages - cur_pfn;
381
382 remap_range_size = xen_find_pfn_range(list, map_size,
383 &remap_pfn);
384 if (!remap_range_size) {
385 pr_warning("Unable to find available pfn range, not remapping identity pages\n");
386 xen_set_identity_and_release_chunk(cur_pfn,
David Vrabelbc7142c2015-01-07 11:01:08 +0000387 cur_pfn + left, nr_pages, released);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700388 break;
389 }
390 /* Adjust size to fit in current e820 RAM region */
391 if (size > remap_range_size)
392 size = remap_range_size;
393
Juergen Gross1f3ac862014-11-28 11:53:53 +0100394 xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700395
396 /* Update variables to reflect new mappings. */
397 i += size;
398 remap_pfn += size;
David Vrabela97dae12015-01-07 11:21:50 +0000399 *remapped += size;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700400 }
401
402 /*
403 * If the PFNs are currently mapped, the VA mapping also needs
404 * to be updated to be 1:1.
405 */
406 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
407 (void)HYPERVISOR_update_va_mapping(
408 (unsigned long)__va(pfn << PAGE_SHIFT),
409 mfn_pte(pfn, PAGE_KERNEL_IO), 0);
410
411 return remap_pfn;
412}
413
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100414static void __init xen_set_identity_and_remap(
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700415 const struct e820entry *list, size_t map_size, unsigned long nr_pages,
David Vrabela97dae12015-01-07 11:21:50 +0000416 unsigned long *released, unsigned long *remapped)
Miroslav Rezanina093d7b42009-09-16 03:56:17 -0400417{
David Vrabelf3f436e2011-09-28 17:46:36 +0100418 phys_addr_t start = 0;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700419 unsigned long last_pfn = nr_pages;
David Vrabelf3f436e2011-09-28 17:46:36 +0100420 const struct e820entry *entry;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700421 unsigned long num_released = 0;
David Vrabela97dae12015-01-07 11:21:50 +0000422 unsigned long num_remapped = 0;
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500423 int i;
424
David Vrabelf3f436e2011-09-28 17:46:36 +0100425 /*
426 * Combine non-RAM regions and gaps until a RAM region (or the
427 * end of the map) is reached, then set the 1:1 map and
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700428 * remap the memory in those non-RAM regions.
David Vrabelf3f436e2011-09-28 17:46:36 +0100429 *
430 * The combined non-RAM regions are rounded to a whole number
431 * of pages so any partial pages are accessible via the 1:1
432 * mapping. This is needed for some BIOSes that put (for
433 * example) the DMI tables in a reserved region that begins on
434 * a non-page boundary.
435 */
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500436 for (i = 0, entry = list; i < map_size; i++, entry++) {
David Vrabelf3f436e2011-09-28 17:46:36 +0100437 phys_addr_t end = entry->addr + entry->size;
David Vrabelf3f436e2011-09-28 17:46:36 +0100438 if (entry->type == E820_RAM || i == map_size - 1) {
439 unsigned long start_pfn = PFN_DOWN(start);
440 unsigned long end_pfn = PFN_UP(end);
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500441
David Vrabelf3f436e2011-09-28 17:46:36 +0100442 if (entry->type == E820_RAM)
443 end_pfn = PFN_UP(entry->addr);
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500444
David Vrabel83d51ab2012-05-03 16:15:42 +0100445 if (start_pfn < end_pfn)
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700446 last_pfn = xen_set_identity_and_remap_chunk(
447 list, map_size, start_pfn,
448 end_pfn, nr_pages, last_pfn,
David Vrabela97dae12015-01-07 11:21:50 +0000449 &num_released, &num_remapped);
David Vrabelf3f436e2011-09-28 17:46:36 +0100450 start = end;
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500451 }
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500452 }
David Vrabelf3f436e2011-09-28 17:46:36 +0100453
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700454 *released = num_released;
David Vrabela97dae12015-01-07 11:21:50 +0000455 *remapped = num_remapped;
David Vrabelf3f436e2011-09-28 17:46:36 +0100456
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700457 pr_info("Released %ld page(s)\n", num_released);
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500458}
Juergen Gross1f3ac862014-11-28 11:53:53 +0100459
460/*
461 * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
462 * The remap information (which mfn remap to which pfn) is contained in the
463 * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
464 * This scheme allows to remap the different chunks in arbitrary order while
465 * the resulting mapping will be independant from the order.
466 */
467void __init xen_remap_memory(void)
468{
469 unsigned long buf = (unsigned long)&xen_remap_buf;
470 unsigned long mfn_save, mfn, pfn;
471 unsigned long remapped = 0;
472 unsigned int i;
473 unsigned long pfn_s = ~0UL;
474 unsigned long len = 0;
475
476 mfn_save = virt_to_mfn(buf);
477
478 while (xen_remap_mfn != INVALID_P2M_ENTRY) {
479 /* Map the remap information */
480 set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
481
482 BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
483
484 pfn = xen_remap_buf.target_pfn;
485 for (i = 0; i < xen_remap_buf.size; i++) {
486 mfn = xen_remap_buf.mfns[i];
487 xen_update_mem_tables(pfn, mfn);
488 remapped++;
489 pfn++;
490 }
491 if (pfn_s == ~0UL || pfn == pfn_s) {
492 pfn_s = xen_remap_buf.target_pfn;
493 len += xen_remap_buf.size;
494 } else if (pfn_s + len == xen_remap_buf.target_pfn) {
495 len += xen_remap_buf.size;
496 } else {
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100497 xen_del_extra_mem(PFN_PHYS(pfn_s), PFN_PHYS(len));
Juergen Gross1f3ac862014-11-28 11:53:53 +0100498 pfn_s = xen_remap_buf.target_pfn;
499 len = xen_remap_buf.size;
500 }
501
502 mfn = xen_remap_mfn;
503 xen_remap_mfn = xen_remap_buf.next_area_mfn;
504 }
505
506 if (pfn_s != ~0UL && len)
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100507 xen_del_extra_mem(PFN_PHYS(pfn_s), PFN_PHYS(len));
Juergen Gross1f3ac862014-11-28 11:53:53 +0100508
509 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
510
511 pr_info("Remapped %ld page(s)\n", remapped);
512}
513
David Vrabeld312ae872011-08-19 15:57:16 +0100514static unsigned long __init xen_get_max_pages(void)
515{
516 unsigned long max_pages = MAX_DOMAIN_PAGES;
517 domid_t domid = DOMID_SELF;
518 int ret;
519
Ian Campbelld3db7282011-12-14 12:16:08 +0000520 /*
521 * For the initial domain we use the maximum reservation as
522 * the maximum page.
523 *
524 * For guest domains the current maximum reservation reflects
525 * the current maximum rather than the static maximum. In this
526 * case the e820 map provided to us will cover the static
527 * maximum region.
528 */
529 if (xen_initial_domain()) {
530 ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
531 if (ret > 0)
532 max_pages = ret;
533 }
534
David Vrabeld312ae872011-08-19 15:57:16 +0100535 return min(max_pages, MAX_DOMAIN_PAGES);
536}
537
Juergen Grossa3f52392015-01-28 07:44:23 +0100538static void __init xen_align_and_add_e820_region(phys_addr_t start,
539 phys_addr_t size, int type)
David Vrabeldc91c722011-09-29 12:26:19 +0100540{
Juergen Gross3ba5c862015-01-28 07:44:22 +0100541 phys_addr_t end = start + size;
David Vrabeldc91c722011-09-29 12:26:19 +0100542
543 /* Align RAM regions to page boundaries. */
544 if (type == E820_RAM) {
545 start = PAGE_ALIGN(start);
Juergen Gross3ba5c862015-01-28 07:44:22 +0100546 end &= ~((phys_addr_t)PAGE_SIZE - 1);
David Vrabeldc91c722011-09-29 12:26:19 +0100547 }
548
549 e820_add_region(start, end - start, type);
550}
551
Juergen Grossa3f52392015-01-28 07:44:23 +0100552static void __init xen_ignore_unusable(struct e820entry *list, size_t map_size)
David Vrabel3bc38cb2013-08-16 15:42:55 +0100553{
554 struct e820entry *entry;
555 unsigned int i;
556
557 for (i = 0, entry = list; i < map_size; i++, entry++) {
558 if (entry->type == E820_UNUSABLE)
559 entry->type = E820_RAM;
560 }
561}
562
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700563/**
564 * machine_specific_memory_setup - Hook for machine specific memory setup.
565 **/
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700566char * __init xen_memory_setup(void)
567{
Ian Campbell35ae11f2009-02-06 19:09:48 -0800568 static struct e820entry map[E820MAX] __initdata;
569
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700570 unsigned long max_pfn = xen_start_info->nr_pages;
Juergen Gross3ba5c862015-01-28 07:44:22 +0100571 phys_addr_t mem_end;
Ian Campbell35ae11f2009-02-06 19:09:48 -0800572 int rc;
573 struct xen_memory_map memmap;
David Vrabeldc91c722011-09-29 12:26:19 +0100574 unsigned long max_pages;
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700575 unsigned long extra_pages = 0;
David Vrabela97dae12015-01-07 11:21:50 +0000576 unsigned long remapped_pages;
Ian Campbell35ae11f2009-02-06 19:09:48 -0800577 int i;
Ian Campbell9e9a5fc2010-09-02 16:16:00 +0100578 int op;
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700579
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100580 max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
Ian Campbell35ae11f2009-02-06 19:09:48 -0800581 mem_end = PFN_PHYS(max_pfn);
582
583 memmap.nr_entries = E820MAX;
584 set_xen_guest_handle(memmap.buffer, map);
585
Ian Campbell9e9a5fc2010-09-02 16:16:00 +0100586 op = xen_initial_domain() ?
587 XENMEM_machine_memory_map :
588 XENMEM_memory_map;
589 rc = HYPERVISOR_memory_op(op, &memmap);
Ian Campbell35ae11f2009-02-06 19:09:48 -0800590 if (rc == -ENOSYS) {
Ian Campbell9ec23a7f2010-10-28 11:32:29 -0700591 BUG_ON(xen_initial_domain());
Ian Campbell35ae11f2009-02-06 19:09:48 -0800592 memmap.nr_entries = 1;
593 map[0].addr = 0ULL;
594 map[0].size = mem_end;
595 /* 8MB slack (to balance backend allocations). */
596 map[0].size += 8ULL << 20;
597 map[0].type = E820_RAM;
598 rc = 0;
599 }
600 BUG_ON(rc);
Martin Kelly1ea644c82014-10-16 20:48:11 -0700601 BUG_ON(memmap.nr_entries == 0);
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100602
David Vrabel3bc38cb2013-08-16 15:42:55 +0100603 /*
604 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
605 * regions, so if we're using the machine memory map leave the
606 * region as RAM as it is in the pseudo-physical map.
607 *
608 * UNUSABLE regions in domUs are not handled and will need
609 * a patch in the future.
610 */
611 if (xen_initial_domain())
612 xen_ignore_unusable(map, memmap.nr_entries);
613
David Vrabeldc91c722011-09-29 12:26:19 +0100614 /* Make sure the Xen-supplied memory map is well-ordered. */
615 sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
Jeremy Fitzhardingebe5bf9f2008-06-16 14:54:46 -0700616
David Vrabeldc91c722011-09-29 12:26:19 +0100617 max_pages = xen_get_max_pages();
618 if (max_pages > max_pfn)
619 extra_pages += max_pages - max_pfn;
Stefano Stabellini7cb31b72011-01-27 10:13:25 -0500620
David Vrabelf3f436e2011-09-28 17:46:36 +0100621 /*
Juergen Gross1f3ac862014-11-28 11:53:53 +0100622 * Set identity map on non-RAM pages and prepare remapping the
623 * underlying RAM.
David Vrabelf3f436e2011-09-28 17:46:36 +0100624 */
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100625 xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn,
David Vrabela97dae12015-01-07 11:21:50 +0000626 &xen_released_pages, &remapped_pages);
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700627
Konrad Rzeszutek Wilk58b7b532012-05-29 12:36:43 -0400628 extra_pages += xen_released_pages;
David Vrabela97dae12015-01-07 11:21:50 +0000629 extra_pages += remapped_pages;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400630
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400631 /*
David Vrabeldc91c722011-09-29 12:26:19 +0100632 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
633 * factor the base size. On non-highmem systems, the base
634 * size is the full initial memory allocation; on highmem it
635 * is limited to the max size of lowmem, so that it doesn't
636 * get completely filled.
637 *
638 * In principle there could be a problem in lowmem systems if
639 * the initial memory is also very large with respect to
640 * lowmem, but we won't try to deal with that here.
641 */
642 extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
643 extra_pages);
David Vrabeldc91c722011-09-29 12:26:19 +0100644 i = 0;
645 while (i < memmap.nr_entries) {
Juergen Gross3ba5c862015-01-28 07:44:22 +0100646 phys_addr_t addr = map[i].addr;
647 phys_addr_t size = map[i].size;
David Vrabeldc91c722011-09-29 12:26:19 +0100648 u32 type = map[i].type;
649
650 if (type == E820_RAM) {
651 if (addr < mem_end) {
652 size = min(size, mem_end - addr);
653 } else if (extra_pages) {
Juergen Gross3ba5c862015-01-28 07:44:22 +0100654 size = min(size, PFN_PHYS(extra_pages));
655 extra_pages -= PFN_DOWN(size);
David Vrabeldc91c722011-09-29 12:26:19 +0100656 xen_add_extra_mem(addr, size);
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100657 xen_max_p2m_pfn = PFN_DOWN(addr + size);
David Vrabeldc91c722011-09-29 12:26:19 +0100658 } else
659 type = E820_UNUSABLE;
Jeremy Fitzhardinge36545812010-09-29 16:54:33 -0700660 }
661
David Vrabeldc91c722011-09-29 12:26:19 +0100662 xen_align_and_add_e820_region(addr, size, type);
Jeremy Fitzhardingeb5b43ce2010-09-02 17:10:12 -0700663
David Vrabeldc91c722011-09-29 12:26:19 +0100664 map[i].addr += size;
665 map[i].size -= size;
666 if (map[i].size == 0)
667 i++;
Ian Campbell35ae11f2009-02-06 19:09:48 -0800668 }
Jeremy Fitzhardingeb792c752008-06-16 14:54:49 -0700669
670 /*
David Vrabel25b884a2014-01-03 15:46:10 +0000671 * Set the rest as identity mapped, in case PCI BARs are
672 * located here.
673 *
674 * PFNs above MAX_P2M_PFN are considered identity mapped as
675 * well.
676 */
677 set_phys_range_identity(map[i-1].addr / PAGE_SIZE, ~0ul);
678
679 /*
Ian Campbell9ec23a7f2010-10-28 11:32:29 -0700680 * In domU, the ISA region is normal, usable memory, but we
681 * reserve ISA memory anyway because too many things poke
Jeremy Fitzhardingeb792c752008-06-16 14:54:49 -0700682 * about in there.
683 */
684 e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
685 E820_RESERVED);
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700686
Jeremy Fitzhardingebe5bf9f2008-06-16 14:54:46 -0700687 /*
688 * Reserve Xen bits:
689 * - mfn_list
690 * - xen_start_info
691 * See comment above "struct start_info" in <xen/interface/xen.h>
Konrad Rzeszutek Wilk51faaf22012-08-22 13:00:10 -0400692 * We tried to make the the memblock_reserve more selective so
693 * that it would be clear what region is reserved. Sadly we ran
694 * in the problem wherein on a 64-bit hypervisor with a 32-bit
695 * initial domain, the pt_base has the cr3 value which is not
696 * neccessarily where the pagetable starts! As Jan put it: "
697 * Actually, the adjustment turns out to be correct: The page
698 * tables for a 32-on-64 dom0 get allocated in the order "first L1",
699 * "first L2", "first L3", so the offset to the page table base is
700 * indeed 2. When reading xen/include/public/xen.h's comment
701 * very strictly, this is not a violation (since there nothing is said
702 * that the first thing in the page table space is pointed to by
703 * pt_base; I admit that this seems to be implied though, namely
704 * do I think that it is implied that the page table space is the
705 * range [pt_base, pt_base + nt_pt_frames), whereas that
706 * range here indeed is [pt_base - 2, pt_base - 2 + nt_pt_frames),
707 * which - without a priori knowledge - the kernel would have
708 * difficulty to figure out)." - so lets just fall back to the
709 * easy way and reserve the whole region.
Jeremy Fitzhardingebe5bf9f2008-06-16 14:54:46 -0700710 */
Tejun Heo24aa0782011-07-12 11:16:06 +0200711 memblock_reserve(__pa(xen_start_info->mfn_list),
712 xen_start_info->pt_base - xen_start_info->mfn_list);
Jeremy Fitzhardingebe5bf9f2008-06-16 14:54:46 -0700713
714 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
715
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700716 return "Xen";
717}
718
Roland McGrathd2eea682007-07-20 00:31:43 -0700719/*
David Vrabelabacaad2014-06-02 17:58:01 +0100720 * Machine specific memory setup for auto-translated guests.
721 */
722char * __init xen_auto_xlated_memory_setup(void)
723{
724 static struct e820entry map[E820MAX] __initdata;
725
726 struct xen_memory_map memmap;
727 int i;
728 int rc;
729
730 memmap.nr_entries = E820MAX;
731 set_xen_guest_handle(memmap.buffer, map);
732
733 rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
734 if (rc < 0)
735 panic("No memory map (%d)\n", rc);
736
737 sanitize_e820_map(map, ARRAY_SIZE(map), &memmap.nr_entries);
738
739 for (i = 0; i < memmap.nr_entries; i++)
740 e820_add_region(map[i].addr, map[i].size, map[i].type);
741
742 memblock_reserve(__pa(xen_start_info->mfn_list),
743 xen_start_info->pt_base - xen_start_info->mfn_list);
744
745 return "Xen";
746}
747
748/*
Roland McGrathd2eea682007-07-20 00:31:43 -0700749 * Set the bit indicating "nosegneg" library variants should be used.
Jeremy Fitzhardinge6a52e4b2008-07-12 02:22:00 -0700750 * We only need to bother in pure 32-bit mode; compat 32-bit processes
751 * can have un-truncated segments, so wrapping around is allowed.
Roland McGrathd2eea682007-07-20 00:31:43 -0700752 */
Sam Ravnborg08b6d292008-01-30 13:33:25 +0100753static void __init fiddle_vdso(void)
Roland McGrathd2eea682007-07-20 00:31:43 -0700754{
Jeremy Fitzhardinge6a52e4b2008-07-12 02:22:00 -0700755#ifdef CONFIG_X86_32
Andy Lutomirski6f121e52014-05-05 12:19:34 -0700756 /*
757 * This could be called before selected_vdso32 is initialized, so
758 * just fiddle with both possible images. vdso_image_32_syscall
759 * can't be selected, since it only exists on 64-bit systems.
760 */
Jeremy Fitzhardinge6a52e4b2008-07-12 02:22:00 -0700761 u32 *mask;
Andy Lutomirski6f121e52014-05-05 12:19:34 -0700762 mask = vdso_image_32_int80.data +
763 vdso_image_32_int80.sym_VDSO32_NOTE_MASK;
Jeremy Fitzhardinge6a52e4b2008-07-12 02:22:00 -0700764 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
Andy Lutomirski6f121e52014-05-05 12:19:34 -0700765 mask = vdso_image_32_sysenter.data +
766 vdso_image_32_sysenter.sym_VDSO32_NOTE_MASK;
Roland McGrathd2eea682007-07-20 00:31:43 -0700767 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700768#endif
Roland McGrathd2eea682007-07-20 00:31:43 -0700769}
770
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400771static int register_callback(unsigned type, const void *func)
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700772{
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700773 struct callback_register callback = {
774 .type = type,
775 .address = XEN_CALLBACK(__KERNEL_CS, func),
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700776 .flags = CALLBACKF_mask_events,
777 };
778
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700779 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
780}
781
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400782void xen_enable_sysenter(void)
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700783{
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700784 int ret;
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700785 unsigned sysenter_feature;
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700786
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700787#ifdef CONFIG_X86_32
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700788 sysenter_feature = X86_FEATURE_SEP;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700789#else
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700790 sysenter_feature = X86_FEATURE_SYSENTER32;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700791#endif
792
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700793 if (!boot_cpu_has(sysenter_feature))
794 return;
795
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700796 ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700797 if(ret != 0)
798 setup_clear_cpu_cap(sysenter_feature);
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700799}
800
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400801void xen_enable_syscall(void)
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700802{
803#ifdef CONFIG_X86_64
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700804 int ret;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700805
806 ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
807 if (ret != 0) {
Jeremy Fitzhardinged5303b82008-07-12 02:22:06 -0700808 printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700809 /* Pretty fatal; 64-bit userspace has no other
810 mechanism for syscalls. */
811 }
812
813 if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700814 ret = register_callback(CALLBACKTYPE_syscall32,
815 xen_syscall32_target);
Jeremy Fitzhardinged5303b82008-07-12 02:22:06 -0700816 if (ret != 0)
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700817 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700818 }
819#endif /* CONFIG_X86_64 */
820}
David Vrabelea9f9272014-06-16 13:07:00 +0200821
Mukesh Rathord285d682013-12-13 12:45:31 -0500822void __init xen_pvmmu_arch_setup(void)
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700823{
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700824 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
825 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
826
Mukesh Rathord285d682013-12-13 12:45:31 -0500827 HYPERVISOR_vm_assist(VMASST_CMD_enable,
828 VMASST_TYPE_pae_extended_cr3);
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700829
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700830 if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
831 register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
832 BUG();
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700833
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700834 xen_enable_sysenter();
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700835 xen_enable_syscall();
Mukesh Rathord285d682013-12-13 12:45:31 -0500836}
837
838/* This function is not called for HVM domains */
839void __init xen_arch_setup(void)
840{
841 xen_panic_handler_init();
842 if (!xen_feature(XENFEAT_auto_translated_physmap))
843 xen_pvmmu_arch_setup();
844
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700845#ifdef CONFIG_ACPI
846 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
847 printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
848 disable_acpi();
849 }
850#endif
851
852 memcpy(boot_command_line, xen_start_info->cmd_line,
853 MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
854 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
855
Jeremy Fitzhardingebc15fde2010-11-22 17:17:50 -0800856 /* Set up idle, making sure it calls safe_halt() pvop */
Len Brownd91ee582011-04-01 18:28:35 -0400857 disable_cpuidle();
Konrad Rzeszutek Wilk48cdd822012-03-13 20:06:57 -0400858 disable_cpufreq();
Len Brown6a377dd2013-02-09 23:08:07 -0500859 WARN_ON(xen_set_default_idle());
Roland McGrathd2eea682007-07-20 00:31:43 -0700860 fiddle_vdso();
Konrad Rzeszutek Wilk8d54db792012-08-17 10:22:37 -0400861#ifdef CONFIG_NUMA
862 numa_off = 1;
863#endif
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700864}