blob: 3fca9c114828d02c9a32dad17240c566ae427763 [file] [log] [blame]
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -07001/*
2 * Machine specific setup for xen
3 *
4 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
5 */
6
7#include <linux/module.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/pm.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -070011#include <linux/memblock.h>
Len Brownd91ee582011-04-01 18:28:35 -040012#include <linux/cpuidle.h>
Konrad Rzeszutek Wilk48cdd822012-03-13 20:06:57 -040013#include <linux/cpufreq.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070014
15#include <asm/elf.h>
Roland McGrath6c3652e2008-01-30 13:30:42 +010016#include <asm/vdso.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070017#include <asm/e820.h>
18#include <asm/setup.h>
Jeremy Fitzhardingeb792c752008-06-16 14:54:49 -070019#include <asm/acpi.h>
Konrad Rzeszutek Wilk8d54db792012-08-17 10:22:37 -040020#include <asm/numa.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070021#include <asm/xen/hypervisor.h>
22#include <asm/xen/hypercall.h>
23
Ian Campbell45263cb2010-10-25 16:32:29 -070024#include <xen/xen.h>
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +010025#include <xen/page.h>
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -070026#include <xen/interface/callback.h>
Ian Campbell35ae11f2009-02-06 19:09:48 -080027#include <xen/interface/memory.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070028#include <xen/interface/physdev.h>
29#include <xen/features.h>
Juergen Gross808fdb72015-07-17 06:51:30 +020030#include <xen/hvc-console.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070031#include "xen-ops.h"
Roland McGrathd2eea682007-07-20 00:31:43 -070032#include "vdso.h"
Matt Rushton4fbb67e32014-08-11 11:57:57 -070033#include "p2m.h"
Juergen Gross1f3ac862014-11-28 11:53:53 +010034#include "mmu.h"
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070035
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070036/* Amount of extra memory space we add to the e820 ranges */
David Vrabel8b5d44a2011-09-28 17:46:34 +010037struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070038
David Vrabelaa244112011-09-28 17:46:32 +010039/* Number of pages released from the initial allocation. */
40unsigned long xen_released_pages;
41
Juergen Gross69632ec2015-07-17 06:51:26 +020042/* E820 map used during setting up memory. */
43static struct e820entry xen_e820_map[E820MAX] __initdata;
44static u32 xen_e820_map_entries __initdata;
45
Juergen Gross1f3ac862014-11-28 11:53:53 +010046/*
47 * Buffer used to remap identity mapped pages. We only need the virtual space.
48 * The physical page behind this address is remapped as needed to different
49 * buffer pages.
50 */
51#define REMAP_SIZE (P2M_PER_PAGE - 3)
52static struct {
53 unsigned long next_area_mfn;
54 unsigned long target_pfn;
55 unsigned long size;
56 unsigned long mfns[REMAP_SIZE];
57} xen_remap_buf __initdata __aligned(PAGE_SIZE);
58static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
Matt Rushton4fbb67e32014-08-11 11:57:57 -070059
Jeremy Fitzhardinge698bb8d2010-09-14 10:19:14 -070060/*
61 * The maximum amount of extra memory compared to the base size. The
62 * main scaling factor is the size of struct page. At extreme ratios
63 * of base:extra, all the base memory can be filled with page
64 * structures for the extra memory, leaving no space for anything
65 * else.
66 *
67 * 10x seems like a reasonable balance between scaling flexibility and
68 * leaving a practically usable system.
69 */
70#define EXTRA_MEM_RATIO (10)
71
Juergen Gross3ba5c862015-01-28 07:44:22 +010072static void __init xen_add_extra_mem(phys_addr_t start, phys_addr_t size)
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070073{
David Vrabeldc91c722011-09-29 12:26:19 +010074 int i;
Konrad Rzeszutek Wilk6eaa4122011-01-18 20:09:41 -050075
David Vrabeldc91c722011-09-29 12:26:19 +010076 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
77 /* Add new region. */
78 if (xen_extra_mem[i].size == 0) {
79 xen_extra_mem[i].start = start;
80 xen_extra_mem[i].size = size;
81 break;
82 }
83 /* Append to existing region. */
84 if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) {
85 xen_extra_mem[i].size += size;
86 break;
87 }
88 }
89 if (i == XEN_EXTRA_MEM_MAX_REGIONS)
90 printk(KERN_WARNING "Warning: not enough extra memory regions\n");
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070091
Tejun Heod4bbf7e2011-11-28 09:46:22 -080092 memblock_reserve(start, size);
Juergen Gross5b8e7d82014-11-28 11:53:55 +010093}
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070094
Juergen Gross3ba5c862015-01-28 07:44:22 +010095static void __init xen_del_extra_mem(phys_addr_t start, phys_addr_t size)
Juergen Gross5b8e7d82014-11-28 11:53:55 +010096{
97 int i;
Juergen Gross3ba5c862015-01-28 07:44:22 +010098 phys_addr_t start_r, size_r;
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070099
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100100 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
101 start_r = xen_extra_mem[i].start;
102 size_r = xen_extra_mem[i].size;
Konrad Rzeszutek Wilkc96aae12012-08-17 16:43:28 -0400103
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100104 /* Start of region. */
105 if (start_r == start) {
106 BUG_ON(size > size_r);
107 xen_extra_mem[i].start += size;
108 xen_extra_mem[i].size -= size;
109 break;
110 }
111 /* End of region. */
112 if (start_r + size_r == start + size) {
113 BUG_ON(size > size_r);
114 xen_extra_mem[i].size -= size;
115 break;
116 }
117 /* Mid of region. */
118 if (start > start_r && start < start_r + size_r) {
119 BUG_ON(start + size > start_r + size_r);
120 xen_extra_mem[i].size = start - start_r;
121 /* Calling memblock_reserve() again is okay. */
122 xen_add_extra_mem(start + size, start_r + size_r -
123 (start + size));
124 break;
125 }
126 }
127 memblock_free(start, size);
128}
129
130/*
131 * Called during boot before the p2m list can take entries beyond the
132 * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
133 * invalid.
134 */
135unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
136{
137 int i;
Juergen Grosse86f9492015-01-12 06:05:09 +0100138 phys_addr_t addr = PFN_PHYS(pfn);
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100139
140 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
141 if (addr >= xen_extra_mem[i].start &&
142 addr < xen_extra_mem[i].start + xen_extra_mem[i].size)
143 return INVALID_P2M_ENTRY;
144 }
145
146 return IDENTITY_FRAME(pfn);
147}
148
149/*
150 * Mark all pfns of extra mem as invalid in p2m list.
151 */
152void __init xen_inv_extra_mem(void)
153{
154 unsigned long pfn, pfn_s, pfn_e;
155 int i;
156
157 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
Juergen Gross9a17ad72015-01-12 06:05:10 +0100158 if (!xen_extra_mem[i].size)
159 continue;
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100160 pfn_s = PFN_DOWN(xen_extra_mem[i].start);
161 pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size);
162 for (pfn = pfn_s; pfn < pfn_e; pfn++)
163 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
Konrad Rzeszutek Wilkc96aae12012-08-17 16:43:28 -0400164 }
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700165}
166
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700167/*
168 * Finds the next RAM pfn available in the E820 map after min_pfn.
169 * This function updates min_pfn with the pfn found and returns
170 * the size of that range or zero if not found.
171 */
Juergen Gross69632ec2015-07-17 06:51:26 +0200172static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn)
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400173{
Juergen Gross69632ec2015-07-17 06:51:26 +0200174 const struct e820entry *entry = xen_e820_map;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400175 unsigned int i;
176 unsigned long done = 0;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400177
Juergen Gross69632ec2015-07-17 06:51:26 +0200178 for (i = 0; i < xen_e820_map_entries; i++, entry++) {
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400179 unsigned long s_pfn;
180 unsigned long e_pfn;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400181
182 if (entry->type != E820_RAM)
183 continue;
184
zhenzhong.duanc3d93f82012-07-18 13:06:39 +0800185 e_pfn = PFN_DOWN(entry->addr + entry->size);
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400186
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700187 /* We only care about E820 after this */
188 if (e_pfn < *min_pfn)
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400189 continue;
190
zhenzhong.duanc3d93f82012-07-18 13:06:39 +0800191 s_pfn = PFN_UP(entry->addr);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700192
193 /* If min_pfn falls within the E820 entry, we want to start
194 * at the min_pfn PFN.
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400195 */
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700196 if (s_pfn <= *min_pfn) {
197 done = e_pfn - *min_pfn;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400198 } else {
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700199 done = e_pfn - s_pfn;
200 *min_pfn = s_pfn;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400201 }
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700202 break;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400203 }
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700204
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400205 return done;
206}
David Vrabel83d51ab2012-05-03 16:15:42 +0100207
Juergen Gross1f3ac862014-11-28 11:53:53 +0100208static int __init xen_free_mfn(unsigned long mfn)
209{
210 struct xen_memory_reservation reservation = {
211 .address_bits = 0,
212 .extent_order = 0,
213 .domid = DOMID_SELF
214 };
215
216 set_xen_guest_handle(reservation.extent_start, &mfn);
217 reservation.nr_extents = 1;
218
219 return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
220}
221
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700222/*
Juergen Gross1f3ac862014-11-28 11:53:53 +0100223 * This releases a chunk of memory and then does the identity map. It's used
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700224 * as a fallback if the remapping fails.
225 */
226static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
Juergen Gross5097cdf2015-07-17 06:51:27 +0200227 unsigned long end_pfn, unsigned long nr_pages)
David Vrabel83d51ab2012-05-03 16:15:42 +0100228{
Juergen Gross1f3ac862014-11-28 11:53:53 +0100229 unsigned long pfn, end;
230 int ret;
231
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700232 WARN_ON(start_pfn > end_pfn);
David Vrabel83d51ab2012-05-03 16:15:42 +0100233
David Vrabelbc7142c2015-01-07 11:01:08 +0000234 /* Release pages first. */
Juergen Gross1f3ac862014-11-28 11:53:53 +0100235 end = min(end_pfn, nr_pages);
236 for (pfn = start_pfn; pfn < end; pfn++) {
237 unsigned long mfn = pfn_to_mfn(pfn);
238
239 /* Make sure pfn exists to start with */
240 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
241 continue;
242
243 ret = xen_free_mfn(mfn);
244 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
245
246 if (ret == 1) {
Juergen Gross5097cdf2015-07-17 06:51:27 +0200247 xen_released_pages++;
Juergen Gross1f3ac862014-11-28 11:53:53 +0100248 if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
249 break;
Juergen Gross1f3ac862014-11-28 11:53:53 +0100250 } else
251 break;
252 }
253
David Vrabelbc7142c2015-01-07 11:01:08 +0000254 set_phys_range_identity(start_pfn, end_pfn);
David Vrabel83d51ab2012-05-03 16:15:42 +0100255}
256
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700257/*
Juergen Gross1f3ac862014-11-28 11:53:53 +0100258 * Helper function to update the p2m and m2p tables and kernel mapping.
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700259 */
Juergen Gross1f3ac862014-11-28 11:53:53 +0100260static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700261{
262 struct mmu_update update = {
Juergen Gross3ba5c862015-01-28 07:44:22 +0100263 .ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700264 .val = pfn
265 };
266
267 /* Update p2m */
Juergen Gross1f3ac862014-11-28 11:53:53 +0100268 if (!set_phys_to_machine(pfn, mfn)) {
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700269 WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
270 pfn, mfn);
Juergen Gross1f3ac862014-11-28 11:53:53 +0100271 BUG();
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700272 }
273
274 /* Update m2p */
275 if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
276 WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
277 mfn, pfn);
Juergen Gross1f3ac862014-11-28 11:53:53 +0100278 BUG();
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700279 }
280
Juergen Gross1f3ac862014-11-28 11:53:53 +0100281 /* Update kernel mapping, but not for highmem. */
Juergen Grosse86f9492015-01-12 06:05:09 +0100282 if (pfn >= PFN_UP(__pa(high_memory - 1)))
Juergen Gross1f3ac862014-11-28 11:53:53 +0100283 return;
284
285 if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
286 mfn_pte(mfn, PAGE_KERNEL), 0)) {
287 WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
288 mfn, pfn);
289 BUG();
290 }
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700291}
292
293/*
294 * This function updates the p2m and m2p tables with an identity map from
Juergen Gross1f3ac862014-11-28 11:53:53 +0100295 * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
296 * original allocation at remap_pfn. The information needed for remapping is
297 * saved in the memory itself to avoid the need for allocating buffers. The
298 * complete remap information is contained in a list of MFNs each containing
299 * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
300 * This enables us to preserve the original mfn sequence while doing the
301 * remapping at a time when the memory management is capable of allocating
302 * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
303 * its callers.
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700304 */
Juergen Gross1f3ac862014-11-28 11:53:53 +0100305static void __init xen_do_set_identity_and_remap_chunk(
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700306 unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
307{
Juergen Gross1f3ac862014-11-28 11:53:53 +0100308 unsigned long buf = (unsigned long)&xen_remap_buf;
309 unsigned long mfn_save, mfn;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700310 unsigned long ident_pfn_iter, remap_pfn_iter;
Juergen Gross1f3ac862014-11-28 11:53:53 +0100311 unsigned long ident_end_pfn = start_pfn + size;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700312 unsigned long left = size;
Juergen Gross1f3ac862014-11-28 11:53:53 +0100313 unsigned int i, chunk;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700314
315 WARN_ON(size == 0);
316
317 BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
318
Juergen Gross1f3ac862014-11-28 11:53:53 +0100319 mfn_save = virt_to_mfn(buf);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700320
Juergen Gross1f3ac862014-11-28 11:53:53 +0100321 for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
322 ident_pfn_iter < ident_end_pfn;
323 ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
324 chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700325
Juergen Gross1f3ac862014-11-28 11:53:53 +0100326 /* Map first pfn to xen_remap_buf */
327 mfn = pfn_to_mfn(ident_pfn_iter);
328 set_pte_mfn(buf, mfn, PAGE_KERNEL);
329
330 /* Save mapping information in page */
331 xen_remap_buf.next_area_mfn = xen_remap_mfn;
332 xen_remap_buf.target_pfn = remap_pfn_iter;
333 xen_remap_buf.size = chunk;
334 for (i = 0; i < chunk; i++)
335 xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
336
337 /* Put remap buf into list. */
338 xen_remap_mfn = mfn;
339
340 /* Set identity map */
David Vrabelbc7142c2015-01-07 11:01:08 +0000341 set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700342
Juergen Gross1f3ac862014-11-28 11:53:53 +0100343 left -= chunk;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700344 }
345
Juergen Gross1f3ac862014-11-28 11:53:53 +0100346 /* Restore old xen_remap_buf mapping */
347 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700348}
349
350/*
351 * This function takes a contiguous pfn range that needs to be identity mapped
352 * and:
353 *
354 * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
355 * 2) Calls the do_ function to actually do the mapping/remapping work.
356 *
357 * The goal is to not allocate additional memory but to remap the existing
358 * pages. In the case of an error the underlying memory is simply released back
359 * to Xen and not remapped.
360 */
Juergen Gross76f0a482014-12-08 06:32:19 +0100361static unsigned long __init xen_set_identity_and_remap_chunk(
Juergen Gross69632ec2015-07-17 06:51:26 +0200362 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
Juergen Gross5097cdf2015-07-17 06:51:27 +0200363 unsigned long remap_pfn)
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700364{
365 unsigned long pfn;
366 unsigned long i = 0;
367 unsigned long n = end_pfn - start_pfn;
368
369 while (i < n) {
370 unsigned long cur_pfn = start_pfn + i;
371 unsigned long left = n - i;
372 unsigned long size = left;
373 unsigned long remap_range_size;
374
375 /* Do not remap pages beyond the current allocation */
376 if (cur_pfn >= nr_pages) {
377 /* Identity map remaining pages */
David Vrabelbc7142c2015-01-07 11:01:08 +0000378 set_phys_range_identity(cur_pfn, cur_pfn + size);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700379 break;
380 }
381 if (cur_pfn + size > nr_pages)
382 size = nr_pages - cur_pfn;
383
Juergen Gross69632ec2015-07-17 06:51:26 +0200384 remap_range_size = xen_find_pfn_range(&remap_pfn);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700385 if (!remap_range_size) {
386 pr_warning("Unable to find available pfn range, not remapping identity pages\n");
387 xen_set_identity_and_release_chunk(cur_pfn,
Juergen Gross5097cdf2015-07-17 06:51:27 +0200388 cur_pfn + left, nr_pages);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700389 break;
390 }
391 /* Adjust size to fit in current e820 RAM region */
392 if (size > remap_range_size)
393 size = remap_range_size;
394
Juergen Gross1f3ac862014-11-28 11:53:53 +0100395 xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700396
397 /* Update variables to reflect new mappings. */
398 i += size;
399 remap_pfn += size;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700400 }
401
402 /*
403 * If the PFNs are currently mapped, the VA mapping also needs
404 * to be updated to be 1:1.
405 */
406 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
407 (void)HYPERVISOR_update_va_mapping(
408 (unsigned long)__va(pfn << PAGE_SHIFT),
409 mfn_pte(pfn, PAGE_KERNEL_IO), 0);
410
411 return remap_pfn;
412}
413
Juergen Gross5097cdf2015-07-17 06:51:27 +0200414static void __init xen_set_identity_and_remap(unsigned long nr_pages)
Miroslav Rezanina093d7b42009-09-16 03:56:17 -0400415{
David Vrabelf3f436e2011-09-28 17:46:36 +0100416 phys_addr_t start = 0;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700417 unsigned long last_pfn = nr_pages;
Juergen Gross69632ec2015-07-17 06:51:26 +0200418 const struct e820entry *entry = xen_e820_map;
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500419 int i;
420
David Vrabelf3f436e2011-09-28 17:46:36 +0100421 /*
422 * Combine non-RAM regions and gaps until a RAM region (or the
423 * end of the map) is reached, then set the 1:1 map and
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700424 * remap the memory in those non-RAM regions.
David Vrabelf3f436e2011-09-28 17:46:36 +0100425 *
426 * The combined non-RAM regions are rounded to a whole number
427 * of pages so any partial pages are accessible via the 1:1
428 * mapping. This is needed for some BIOSes that put (for
429 * example) the DMI tables in a reserved region that begins on
430 * a non-page boundary.
431 */
Juergen Gross69632ec2015-07-17 06:51:26 +0200432 for (i = 0; i < xen_e820_map_entries; i++, entry++) {
David Vrabelf3f436e2011-09-28 17:46:36 +0100433 phys_addr_t end = entry->addr + entry->size;
Juergen Gross69632ec2015-07-17 06:51:26 +0200434 if (entry->type == E820_RAM || i == xen_e820_map_entries - 1) {
David Vrabelf3f436e2011-09-28 17:46:36 +0100435 unsigned long start_pfn = PFN_DOWN(start);
436 unsigned long end_pfn = PFN_UP(end);
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500437
David Vrabelf3f436e2011-09-28 17:46:36 +0100438 if (entry->type == E820_RAM)
439 end_pfn = PFN_UP(entry->addr);
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500440
David Vrabel83d51ab2012-05-03 16:15:42 +0100441 if (start_pfn < end_pfn)
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700442 last_pfn = xen_set_identity_and_remap_chunk(
Juergen Gross69632ec2015-07-17 06:51:26 +0200443 start_pfn, end_pfn, nr_pages,
Juergen Gross5097cdf2015-07-17 06:51:27 +0200444 last_pfn);
David Vrabelf3f436e2011-09-28 17:46:36 +0100445 start = end;
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500446 }
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500447 }
David Vrabelf3f436e2011-09-28 17:46:36 +0100448
Juergen Gross5097cdf2015-07-17 06:51:27 +0200449 pr_info("Released %ld page(s)\n", xen_released_pages);
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500450}
Juergen Gross1f3ac862014-11-28 11:53:53 +0100451
452/*
453 * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
454 * The remap information (which mfn remap to which pfn) is contained in the
455 * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
456 * This scheme allows to remap the different chunks in arbitrary order while
457 * the resulting mapping will be independant from the order.
458 */
459void __init xen_remap_memory(void)
460{
461 unsigned long buf = (unsigned long)&xen_remap_buf;
462 unsigned long mfn_save, mfn, pfn;
463 unsigned long remapped = 0;
464 unsigned int i;
465 unsigned long pfn_s = ~0UL;
466 unsigned long len = 0;
467
468 mfn_save = virt_to_mfn(buf);
469
470 while (xen_remap_mfn != INVALID_P2M_ENTRY) {
471 /* Map the remap information */
472 set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
473
474 BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
475
476 pfn = xen_remap_buf.target_pfn;
477 for (i = 0; i < xen_remap_buf.size; i++) {
478 mfn = xen_remap_buf.mfns[i];
479 xen_update_mem_tables(pfn, mfn);
480 remapped++;
481 pfn++;
482 }
483 if (pfn_s == ~0UL || pfn == pfn_s) {
484 pfn_s = xen_remap_buf.target_pfn;
485 len += xen_remap_buf.size;
486 } else if (pfn_s + len == xen_remap_buf.target_pfn) {
487 len += xen_remap_buf.size;
488 } else {
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100489 xen_del_extra_mem(PFN_PHYS(pfn_s), PFN_PHYS(len));
Juergen Gross1f3ac862014-11-28 11:53:53 +0100490 pfn_s = xen_remap_buf.target_pfn;
491 len = xen_remap_buf.size;
492 }
493
494 mfn = xen_remap_mfn;
495 xen_remap_mfn = xen_remap_buf.next_area_mfn;
496 }
497
498 if (pfn_s != ~0UL && len)
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100499 xen_del_extra_mem(PFN_PHYS(pfn_s), PFN_PHYS(len));
Juergen Gross1f3ac862014-11-28 11:53:53 +0100500
501 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
502
503 pr_info("Remapped %ld page(s)\n", remapped);
504}
505
David Vrabeld312ae872011-08-19 15:57:16 +0100506static unsigned long __init xen_get_max_pages(void)
507{
508 unsigned long max_pages = MAX_DOMAIN_PAGES;
509 domid_t domid = DOMID_SELF;
510 int ret;
511
Ian Campbelld3db7282011-12-14 12:16:08 +0000512 /*
513 * For the initial domain we use the maximum reservation as
514 * the maximum page.
515 *
516 * For guest domains the current maximum reservation reflects
517 * the current maximum rather than the static maximum. In this
518 * case the e820 map provided to us will cover the static
519 * maximum region.
520 */
521 if (xen_initial_domain()) {
522 ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
523 if (ret > 0)
524 max_pages = ret;
525 }
526
David Vrabeld312ae872011-08-19 15:57:16 +0100527 return min(max_pages, MAX_DOMAIN_PAGES);
528}
529
Juergen Grossa3f52392015-01-28 07:44:23 +0100530static void __init xen_align_and_add_e820_region(phys_addr_t start,
531 phys_addr_t size, int type)
David Vrabeldc91c722011-09-29 12:26:19 +0100532{
Juergen Gross3ba5c862015-01-28 07:44:22 +0100533 phys_addr_t end = start + size;
David Vrabeldc91c722011-09-29 12:26:19 +0100534
535 /* Align RAM regions to page boundaries. */
536 if (type == E820_RAM) {
537 start = PAGE_ALIGN(start);
Juergen Gross3ba5c862015-01-28 07:44:22 +0100538 end &= ~((phys_addr_t)PAGE_SIZE - 1);
David Vrabeldc91c722011-09-29 12:26:19 +0100539 }
540
541 e820_add_region(start, end - start, type);
542}
543
Juergen Gross69632ec2015-07-17 06:51:26 +0200544static void __init xen_ignore_unusable(void)
David Vrabel3bc38cb2013-08-16 15:42:55 +0100545{
Juergen Gross69632ec2015-07-17 06:51:26 +0200546 struct e820entry *entry = xen_e820_map;
David Vrabel3bc38cb2013-08-16 15:42:55 +0100547 unsigned int i;
548
Juergen Gross69632ec2015-07-17 06:51:26 +0200549 for (i = 0; i < xen_e820_map_entries; i++, entry++) {
David Vrabel3bc38cb2013-08-16 15:42:55 +0100550 if (entry->type == E820_UNUSABLE)
551 entry->type = E820_RAM;
552 }
553}
554
Juergen Gross5097cdf2015-07-17 06:51:27 +0200555static unsigned long __init xen_count_remap_pages(unsigned long max_pfn)
556{
557 unsigned long extra = 0;
558 const struct e820entry *entry = xen_e820_map;
559 int i;
560
561 for (i = 0; i < xen_e820_map_entries; i++, entry++) {
562 unsigned long start_pfn = PFN_DOWN(entry->addr);
563 unsigned long end_pfn = PFN_UP(entry->addr + entry->size);
564
565 if (start_pfn >= max_pfn)
566 break;
567 if (entry->type == E820_RAM)
568 continue;
569 if (end_pfn >= max_pfn)
570 end_pfn = max_pfn;
571 extra += end_pfn - start_pfn;
572 }
573
574 return extra;
575}
576
Juergen Grosse612b4a2015-07-17 06:51:28 +0200577bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
578{
579 struct e820entry *entry;
580 unsigned mapcnt;
581 phys_addr_t end;
582
583 if (!size)
584 return false;
585
586 end = start + size;
587 entry = xen_e820_map;
588
589 for (mapcnt = 0; mapcnt < xen_e820_map_entries; mapcnt++) {
590 if (entry->type == E820_RAM && entry->addr <= start &&
591 (entry->addr + entry->size) >= end)
592 return false;
593
594 entry++;
595 }
596
597 return true;
598}
599
Juergen Gross8f5b0c62015-07-17 06:51:25 +0200600/*
Juergen Gross9ddac5b2015-07-17 06:51:29 +0200601 * Find a free area in physical memory not yet reserved and compliant with
602 * E820 map.
603 * Used to relocate pre-allocated areas like initrd or p2m list which are in
604 * conflict with the to be used E820 map.
605 * In case no area is found, return 0. Otherwise return the physical address
606 * of the area which is already reserved for convenience.
607 */
608phys_addr_t __init xen_find_free_area(phys_addr_t size)
609{
610 unsigned mapcnt;
611 phys_addr_t addr, start;
612 struct e820entry *entry = xen_e820_map;
613
614 for (mapcnt = 0; mapcnt < xen_e820_map_entries; mapcnt++, entry++) {
615 if (entry->type != E820_RAM || entry->size < size)
616 continue;
617 start = entry->addr;
618 for (addr = start; addr < start + size; addr += PAGE_SIZE) {
619 if (!memblock_is_reserved(addr))
620 continue;
621 start = addr + PAGE_SIZE;
622 if (start + size > entry->addr + entry->size)
623 break;
624 }
625 if (addr >= start + size) {
626 memblock_reserve(start, size);
627 return start;
628 }
629 }
630
631 return 0;
632}
633
634/*
Juergen Gross8f5b0c62015-07-17 06:51:25 +0200635 * Reserve Xen mfn_list.
636 * See comment above "struct start_info" in <xen/interface/xen.h>
637 * We tried to make the the memblock_reserve more selective so
638 * that it would be clear what region is reserved. Sadly we ran
639 * in the problem wherein on a 64-bit hypervisor with a 32-bit
640 * initial domain, the pt_base has the cr3 value which is not
641 * neccessarily where the pagetable starts! As Jan put it: "
642 * Actually, the adjustment turns out to be correct: The page
643 * tables for a 32-on-64 dom0 get allocated in the order "first L1",
644 * "first L2", "first L3", so the offset to the page table base is
645 * indeed 2. When reading xen/include/public/xen.h's comment
646 * very strictly, this is not a violation (since there nothing is said
647 * that the first thing in the page table space is pointed to by
648 * pt_base; I admit that this seems to be implied though, namely
649 * do I think that it is implied that the page table space is the
650 * range [pt_base, pt_base + nt_pt_frames), whereas that
651 * range here indeed is [pt_base - 2, pt_base - 2 + nt_pt_frames),
652 * which - without a priori knowledge - the kernel would have
653 * difficulty to figure out)." - so lets just fall back to the
654 * easy way and reserve the whole region.
655 */
656static void __init xen_reserve_xen_mfnlist(void)
657{
658 if (xen_start_info->mfn_list >= __START_KERNEL_map) {
659 memblock_reserve(__pa(xen_start_info->mfn_list),
660 xen_start_info->pt_base -
661 xen_start_info->mfn_list);
662 return;
663 }
664
665 memblock_reserve(PFN_PHYS(xen_start_info->first_p2m_pfn),
666 PFN_PHYS(xen_start_info->nr_p2m_frames));
667}
668
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700669/**
670 * machine_specific_memory_setup - Hook for machine specific memory setup.
671 **/
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700672char * __init xen_memory_setup(void)
673{
674 unsigned long max_pfn = xen_start_info->nr_pages;
Juergen Gross5097cdf2015-07-17 06:51:27 +0200675 phys_addr_t mem_end, addr, size, chunk_size;
676 u32 type;
Ian Campbell35ae11f2009-02-06 19:09:48 -0800677 int rc;
678 struct xen_memory_map memmap;
David Vrabeldc91c722011-09-29 12:26:19 +0100679 unsigned long max_pages;
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700680 unsigned long extra_pages = 0;
Ian Campbell35ae11f2009-02-06 19:09:48 -0800681 int i;
Ian Campbell9e9a5fc2010-09-02 16:16:00 +0100682 int op;
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700683
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100684 max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
Ian Campbell35ae11f2009-02-06 19:09:48 -0800685 mem_end = PFN_PHYS(max_pfn);
686
687 memmap.nr_entries = E820MAX;
Juergen Gross69632ec2015-07-17 06:51:26 +0200688 set_xen_guest_handle(memmap.buffer, xen_e820_map);
Ian Campbell35ae11f2009-02-06 19:09:48 -0800689
Ian Campbell9e9a5fc2010-09-02 16:16:00 +0100690 op = xen_initial_domain() ?
691 XENMEM_machine_memory_map :
692 XENMEM_memory_map;
693 rc = HYPERVISOR_memory_op(op, &memmap);
Ian Campbell35ae11f2009-02-06 19:09:48 -0800694 if (rc == -ENOSYS) {
Ian Campbell9ec23a7f2010-10-28 11:32:29 -0700695 BUG_ON(xen_initial_domain());
Ian Campbell35ae11f2009-02-06 19:09:48 -0800696 memmap.nr_entries = 1;
Juergen Gross69632ec2015-07-17 06:51:26 +0200697 xen_e820_map[0].addr = 0ULL;
698 xen_e820_map[0].size = mem_end;
Ian Campbell35ae11f2009-02-06 19:09:48 -0800699 /* 8MB slack (to balance backend allocations). */
Juergen Gross69632ec2015-07-17 06:51:26 +0200700 xen_e820_map[0].size += 8ULL << 20;
701 xen_e820_map[0].type = E820_RAM;
Ian Campbell35ae11f2009-02-06 19:09:48 -0800702 rc = 0;
703 }
704 BUG_ON(rc);
Martin Kelly1ea644c2014-10-16 20:48:11 -0700705 BUG_ON(memmap.nr_entries == 0);
Juergen Gross69632ec2015-07-17 06:51:26 +0200706 xen_e820_map_entries = memmap.nr_entries;
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100707
David Vrabel3bc38cb2013-08-16 15:42:55 +0100708 /*
709 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
710 * regions, so if we're using the machine memory map leave the
711 * region as RAM as it is in the pseudo-physical map.
712 *
713 * UNUSABLE regions in domUs are not handled and will need
714 * a patch in the future.
715 */
716 if (xen_initial_domain())
Juergen Gross69632ec2015-07-17 06:51:26 +0200717 xen_ignore_unusable();
David Vrabel3bc38cb2013-08-16 15:42:55 +0100718
David Vrabeldc91c722011-09-29 12:26:19 +0100719 /* Make sure the Xen-supplied memory map is well-ordered. */
Juergen Gross69632ec2015-07-17 06:51:26 +0200720 sanitize_e820_map(xen_e820_map, xen_e820_map_entries,
721 &xen_e820_map_entries);
Jeremy Fitzhardingebe5bf9f2008-06-16 14:54:46 -0700722
David Vrabeldc91c722011-09-29 12:26:19 +0100723 max_pages = xen_get_max_pages();
724 if (max_pages > max_pfn)
725 extra_pages += max_pages - max_pfn;
Stefano Stabellini7cb31b72011-01-27 10:13:25 -0500726
Juergen Gross5097cdf2015-07-17 06:51:27 +0200727 /* How many extra pages do we need due to remapping? */
728 extra_pages += xen_count_remap_pages(max_pfn);
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400729
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400730 /*
David Vrabeldc91c722011-09-29 12:26:19 +0100731 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
732 * factor the base size. On non-highmem systems, the base
733 * size is the full initial memory allocation; on highmem it
734 * is limited to the max size of lowmem, so that it doesn't
735 * get completely filled.
736 *
737 * In principle there could be a problem in lowmem systems if
738 * the initial memory is also very large with respect to
739 * lowmem, but we won't try to deal with that here.
740 */
741 extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
742 extra_pages);
David Vrabeldc91c722011-09-29 12:26:19 +0100743 i = 0;
Juergen Gross5097cdf2015-07-17 06:51:27 +0200744 addr = xen_e820_map[0].addr;
745 size = xen_e820_map[0].size;
Juergen Gross69632ec2015-07-17 06:51:26 +0200746 while (i < xen_e820_map_entries) {
Juergen Gross5097cdf2015-07-17 06:51:27 +0200747 chunk_size = size;
748 type = xen_e820_map[i].type;
David Vrabeldc91c722011-09-29 12:26:19 +0100749
750 if (type == E820_RAM) {
751 if (addr < mem_end) {
Juergen Gross5097cdf2015-07-17 06:51:27 +0200752 chunk_size = min(size, mem_end - addr);
David Vrabeldc91c722011-09-29 12:26:19 +0100753 } else if (extra_pages) {
Juergen Gross5097cdf2015-07-17 06:51:27 +0200754 chunk_size = min(size, PFN_PHYS(extra_pages));
755 extra_pages -= PFN_DOWN(chunk_size);
756 xen_add_extra_mem(addr, chunk_size);
757 xen_max_p2m_pfn = PFN_DOWN(addr + chunk_size);
David Vrabeldc91c722011-09-29 12:26:19 +0100758 } else
759 type = E820_UNUSABLE;
Jeremy Fitzhardinge36545812010-09-29 16:54:33 -0700760 }
761
Juergen Gross5097cdf2015-07-17 06:51:27 +0200762 xen_align_and_add_e820_region(addr, chunk_size, type);
Jeremy Fitzhardingeb5b43ce2010-09-02 17:10:12 -0700763
Juergen Gross5097cdf2015-07-17 06:51:27 +0200764 addr += chunk_size;
765 size -= chunk_size;
766 if (size == 0) {
David Vrabeldc91c722011-09-29 12:26:19 +0100767 i++;
Juergen Gross5097cdf2015-07-17 06:51:27 +0200768 if (i < xen_e820_map_entries) {
769 addr = xen_e820_map[i].addr;
770 size = xen_e820_map[i].size;
771 }
772 }
Ian Campbell35ae11f2009-02-06 19:09:48 -0800773 }
Jeremy Fitzhardingeb792c752008-06-16 14:54:49 -0700774
775 /*
David Vrabel25b884a2014-01-03 15:46:10 +0000776 * Set the rest as identity mapped, in case PCI BARs are
777 * located here.
778 *
779 * PFNs above MAX_P2M_PFN are considered identity mapped as
780 * well.
781 */
Juergen Gross5097cdf2015-07-17 06:51:27 +0200782 set_phys_range_identity(addr / PAGE_SIZE, ~0ul);
David Vrabel25b884a2014-01-03 15:46:10 +0000783
784 /*
Ian Campbell9ec23a7f2010-10-28 11:32:29 -0700785 * In domU, the ISA region is normal, usable memory, but we
786 * reserve ISA memory anyway because too many things poke
Jeremy Fitzhardingeb792c752008-06-16 14:54:49 -0700787 * about in there.
788 */
789 e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
790 E820_RESERVED);
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700791
Jeremy Fitzhardingebe5bf9f2008-06-16 14:54:46 -0700792 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
793
Juergen Gross808fdb72015-07-17 06:51:30 +0200794 /*
795 * Check whether the kernel itself conflicts with the target E820 map.
796 * Failing now is better than running into weird problems later due
797 * to relocating (and even reusing) pages with kernel text or data.
798 */
799 if (xen_is_e820_reserved(__pa_symbol(_text),
800 __pa_symbol(__bss_stop) - __pa_symbol(_text))) {
801 xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
802 BUG();
803 }
804
Juergen Gross04414ba2015-07-17 06:51:31 +0200805 /*
806 * Check for a conflict of the hypervisor supplied page tables with
807 * the target E820 map.
808 */
809 xen_pt_check_e820();
810
Juergen Gross8f5b0c62015-07-17 06:51:25 +0200811 xen_reserve_xen_mfnlist();
812
Juergen Gross5097cdf2015-07-17 06:51:27 +0200813 /*
814 * Set identity map on non-RAM pages and prepare remapping the
815 * underlying RAM.
816 */
817 xen_set_identity_and_remap(max_pfn);
818
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700819 return "Xen";
820}
821
Roland McGrathd2eea682007-07-20 00:31:43 -0700822/*
David Vrabelabacaad2014-06-02 17:58:01 +0100823 * Machine specific memory setup for auto-translated guests.
824 */
825char * __init xen_auto_xlated_memory_setup(void)
826{
David Vrabelabacaad2014-06-02 17:58:01 +0100827 struct xen_memory_map memmap;
828 int i;
829 int rc;
830
831 memmap.nr_entries = E820MAX;
Juergen Gross69632ec2015-07-17 06:51:26 +0200832 set_xen_guest_handle(memmap.buffer, xen_e820_map);
David Vrabelabacaad2014-06-02 17:58:01 +0100833
834 rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
835 if (rc < 0)
836 panic("No memory map (%d)\n", rc);
837
Juergen Gross69632ec2015-07-17 06:51:26 +0200838 xen_e820_map_entries = memmap.nr_entries;
David Vrabelabacaad2014-06-02 17:58:01 +0100839
Juergen Gross69632ec2015-07-17 06:51:26 +0200840 sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map),
841 &xen_e820_map_entries);
842
843 for (i = 0; i < xen_e820_map_entries; i++)
844 e820_add_region(xen_e820_map[i].addr, xen_e820_map[i].size,
845 xen_e820_map[i].type);
David Vrabelabacaad2014-06-02 17:58:01 +0100846
Juergen Gross8f5b0c62015-07-17 06:51:25 +0200847 xen_reserve_xen_mfnlist();
David Vrabelabacaad2014-06-02 17:58:01 +0100848
849 return "Xen";
850}
851
852/*
Roland McGrathd2eea682007-07-20 00:31:43 -0700853 * Set the bit indicating "nosegneg" library variants should be used.
Jeremy Fitzhardinge6a52e4b2008-07-12 02:22:00 -0700854 * We only need to bother in pure 32-bit mode; compat 32-bit processes
855 * can have un-truncated segments, so wrapping around is allowed.
Roland McGrathd2eea682007-07-20 00:31:43 -0700856 */
Sam Ravnborg08b6d292008-01-30 13:33:25 +0100857static void __init fiddle_vdso(void)
Roland McGrathd2eea682007-07-20 00:31:43 -0700858{
Jeremy Fitzhardinge6a52e4b2008-07-12 02:22:00 -0700859#ifdef CONFIG_X86_32
Andy Lutomirski6f121e52014-05-05 12:19:34 -0700860 /*
861 * This could be called before selected_vdso32 is initialized, so
862 * just fiddle with both possible images. vdso_image_32_syscall
863 * can't be selected, since it only exists on 64-bit systems.
864 */
Jeremy Fitzhardinge6a52e4b2008-07-12 02:22:00 -0700865 u32 *mask;
Andy Lutomirski6f121e52014-05-05 12:19:34 -0700866 mask = vdso_image_32_int80.data +
867 vdso_image_32_int80.sym_VDSO32_NOTE_MASK;
Jeremy Fitzhardinge6a52e4b2008-07-12 02:22:00 -0700868 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
Andy Lutomirski6f121e52014-05-05 12:19:34 -0700869 mask = vdso_image_32_sysenter.data +
870 vdso_image_32_sysenter.sym_VDSO32_NOTE_MASK;
Roland McGrathd2eea682007-07-20 00:31:43 -0700871 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700872#endif
Roland McGrathd2eea682007-07-20 00:31:43 -0700873}
874
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400875static int register_callback(unsigned type, const void *func)
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700876{
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700877 struct callback_register callback = {
878 .type = type,
879 .address = XEN_CALLBACK(__KERNEL_CS, func),
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700880 .flags = CALLBACKF_mask_events,
881 };
882
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700883 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
884}
885
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400886void xen_enable_sysenter(void)
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700887{
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700888 int ret;
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700889 unsigned sysenter_feature;
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700890
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700891#ifdef CONFIG_X86_32
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700892 sysenter_feature = X86_FEATURE_SEP;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700893#else
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700894 sysenter_feature = X86_FEATURE_SYSENTER32;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700895#endif
896
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700897 if (!boot_cpu_has(sysenter_feature))
898 return;
899
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700900 ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700901 if(ret != 0)
902 setup_clear_cpu_cap(sysenter_feature);
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700903}
904
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400905void xen_enable_syscall(void)
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700906{
907#ifdef CONFIG_X86_64
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700908 int ret;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700909
910 ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
911 if (ret != 0) {
Jeremy Fitzhardinged5303b82008-07-12 02:22:06 -0700912 printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700913 /* Pretty fatal; 64-bit userspace has no other
914 mechanism for syscalls. */
915 }
916
917 if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700918 ret = register_callback(CALLBACKTYPE_syscall32,
919 xen_syscall32_target);
Jeremy Fitzhardinged5303b82008-07-12 02:22:06 -0700920 if (ret != 0)
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700921 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700922 }
923#endif /* CONFIG_X86_64 */
924}
David Vrabelea9f9272014-06-16 13:07:00 +0200925
Mukesh Rathord285d682013-12-13 12:45:31 -0500926void __init xen_pvmmu_arch_setup(void)
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700927{
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700928 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
929 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
930
Mukesh Rathord285d682013-12-13 12:45:31 -0500931 HYPERVISOR_vm_assist(VMASST_CMD_enable,
932 VMASST_TYPE_pae_extended_cr3);
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700933
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700934 if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
935 register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
936 BUG();
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700937
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700938 xen_enable_sysenter();
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700939 xen_enable_syscall();
Mukesh Rathord285d682013-12-13 12:45:31 -0500940}
941
942/* This function is not called for HVM domains */
943void __init xen_arch_setup(void)
944{
945 xen_panic_handler_init();
946 if (!xen_feature(XENFEAT_auto_translated_physmap))
947 xen_pvmmu_arch_setup();
948
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700949#ifdef CONFIG_ACPI
950 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
951 printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
952 disable_acpi();
953 }
954#endif
955
956 memcpy(boot_command_line, xen_start_info->cmd_line,
957 MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
958 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
959
Jeremy Fitzhardingebc15fde2010-11-22 17:17:50 -0800960 /* Set up idle, making sure it calls safe_halt() pvop */
Len Brownd91ee582011-04-01 18:28:35 -0400961 disable_cpuidle();
Konrad Rzeszutek Wilk48cdd822012-03-13 20:06:57 -0400962 disable_cpufreq();
Len Brown6a377dd2013-02-09 23:08:07 -0500963 WARN_ON(xen_set_default_idle());
Roland McGrathd2eea682007-07-20 00:31:43 -0700964 fiddle_vdso();
Konrad Rzeszutek Wilk8d54db792012-08-17 10:22:37 -0400965#ifdef CONFIG_NUMA
966 numa_off = 1;
967#endif
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700968}