blob: 176425233e4d4d07ecb6633d38806d37a2598db0 [file] [log] [blame]
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -07001/*
2 * Machine specific setup for xen
3 *
4 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
5 */
6
Paul Gortmaker7a2463d2016-07-13 20:18:59 -04007#include <linux/init.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -07008#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/pm.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -070011#include <linux/memblock.h>
Len Brownd91ee582011-04-01 18:28:35 -040012#include <linux/cpuidle.h>
Konrad Rzeszutek Wilk48cdd822012-03-13 20:06:57 -040013#include <linux/cpufreq.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070014
15#include <asm/elf.h>
Roland McGrath6c3652e2008-01-30 13:30:42 +010016#include <asm/vdso.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070017#include <asm/e820.h>
18#include <asm/setup.h>
Jeremy Fitzhardingeb792c752008-06-16 14:54:49 -070019#include <asm/acpi.h>
Konrad Rzeszutek Wilk8d54db792012-08-17 10:22:37 -040020#include <asm/numa.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070021#include <asm/xen/hypervisor.h>
22#include <asm/xen/hypercall.h>
23
Ian Campbell45263cb2010-10-25 16:32:29 -070024#include <xen/xen.h>
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +010025#include <xen/page.h>
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -070026#include <xen/interface/callback.h>
Ian Campbell35ae11f2009-02-06 19:09:48 -080027#include <xen/interface/memory.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070028#include <xen/interface/physdev.h>
29#include <xen/features.h>
Juergen Gross808fdb72015-07-17 06:51:30 +020030#include <xen/hvc-console.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070031#include "xen-ops.h"
Roland McGrathd2eea682007-07-20 00:31:43 -070032#include "vdso.h"
Juergen Gross1f3ac862014-11-28 11:53:53 +010033#include "mmu.h"
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070034
Juergen Grossc70727a2015-07-17 06:51:36 +020035#define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
36
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070037/* Amount of extra memory space we add to the e820 ranges */
David Vrabel8b5d44a2011-09-28 17:46:34 +010038struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070039
David Vrabelaa244112011-09-28 17:46:32 +010040/* Number of pages released from the initial allocation. */
41unsigned long xen_released_pages;
42
Juergen Gross69632ec2015-07-17 06:51:26 +020043/* E820 map used during setting up memory. */
44static struct e820entry xen_e820_map[E820MAX] __initdata;
45static u32 xen_e820_map_entries __initdata;
46
Juergen Gross1f3ac862014-11-28 11:53:53 +010047/*
48 * Buffer used to remap identity mapped pages. We only need the virtual space.
49 * The physical page behind this address is remapped as needed to different
50 * buffer pages.
51 */
52#define REMAP_SIZE (P2M_PER_PAGE - 3)
53static struct {
54 unsigned long next_area_mfn;
55 unsigned long target_pfn;
56 unsigned long size;
57 unsigned long mfns[REMAP_SIZE];
58} xen_remap_buf __initdata __aligned(PAGE_SIZE);
59static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
Matt Rushton4fbb67e32014-08-11 11:57:57 -070060
Jeremy Fitzhardinge698bb8d2010-09-14 10:19:14 -070061/*
62 * The maximum amount of extra memory compared to the base size. The
63 * main scaling factor is the size of struct page. At extreme ratios
64 * of base:extra, all the base memory can be filled with page
65 * structures for the extra memory, leaving no space for anything
66 * else.
67 *
68 * 10x seems like a reasonable balance between scaling flexibility and
69 * leaving a practically usable system.
70 */
71#define EXTRA_MEM_RATIO (10)
72
Juergen Grossc70727a2015-07-17 06:51:36 +020073static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
74
75static void __init xen_parse_512gb(void)
76{
77 bool val = false;
78 char *arg;
79
80 arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit");
81 if (!arg)
82 return;
83
84 arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit=");
85 if (!arg)
86 val = true;
87 else if (strtobool(arg + strlen("xen_512gb_limit="), &val))
88 return;
89
90 xen_512gb_limit = val;
91}
92
Juergen Gross626d7502015-09-04 14:05:51 +020093static void __init xen_add_extra_mem(unsigned long start_pfn,
94 unsigned long n_pfns)
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070095{
David Vrabeldc91c722011-09-29 12:26:19 +010096 int i;
Konrad Rzeszutek Wilk6eaa4122011-01-18 20:09:41 -050097
Juergen Gross626d7502015-09-04 14:05:51 +020098 /*
99 * No need to check for zero size, should happen rarely and will only
100 * write a new entry regarded to be unused due to zero size.
101 */
David Vrabeldc91c722011-09-29 12:26:19 +0100102 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
103 /* Add new region. */
Juergen Gross626d7502015-09-04 14:05:51 +0200104 if (xen_extra_mem[i].n_pfns == 0) {
105 xen_extra_mem[i].start_pfn = start_pfn;
106 xen_extra_mem[i].n_pfns = n_pfns;
David Vrabeldc91c722011-09-29 12:26:19 +0100107 break;
108 }
109 /* Append to existing region. */
Juergen Gross626d7502015-09-04 14:05:51 +0200110 if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns ==
111 start_pfn) {
112 xen_extra_mem[i].n_pfns += n_pfns;
David Vrabeldc91c722011-09-29 12:26:19 +0100113 break;
114 }
115 }
116 if (i == XEN_EXTRA_MEM_MAX_REGIONS)
117 printk(KERN_WARNING "Warning: not enough extra memory regions\n");
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700118
Juergen Gross626d7502015-09-04 14:05:51 +0200119 memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100120}
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700121
Juergen Gross626d7502015-09-04 14:05:51 +0200122static void __init xen_del_extra_mem(unsigned long start_pfn,
123 unsigned long n_pfns)
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100124{
125 int i;
Juergen Gross626d7502015-09-04 14:05:51 +0200126 unsigned long start_r, size_r;
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700127
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100128 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
Juergen Gross626d7502015-09-04 14:05:51 +0200129 start_r = xen_extra_mem[i].start_pfn;
130 size_r = xen_extra_mem[i].n_pfns;
Konrad Rzeszutek Wilkc96aae12012-08-17 16:43:28 -0400131
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100132 /* Start of region. */
Juergen Gross626d7502015-09-04 14:05:51 +0200133 if (start_r == start_pfn) {
134 BUG_ON(n_pfns > size_r);
135 xen_extra_mem[i].start_pfn += n_pfns;
136 xen_extra_mem[i].n_pfns -= n_pfns;
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100137 break;
138 }
139 /* End of region. */
Juergen Gross626d7502015-09-04 14:05:51 +0200140 if (start_r + size_r == start_pfn + n_pfns) {
141 BUG_ON(n_pfns > size_r);
142 xen_extra_mem[i].n_pfns -= n_pfns;
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100143 break;
144 }
145 /* Mid of region. */
Juergen Gross626d7502015-09-04 14:05:51 +0200146 if (start_pfn > start_r && start_pfn < start_r + size_r) {
147 BUG_ON(start_pfn + n_pfns > start_r + size_r);
148 xen_extra_mem[i].n_pfns = start_pfn - start_r;
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100149 /* Calling memblock_reserve() again is okay. */
Juergen Gross626d7502015-09-04 14:05:51 +0200150 xen_add_extra_mem(start_pfn + n_pfns, start_r + size_r -
151 (start_pfn + n_pfns));
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100152 break;
153 }
154 }
Juergen Gross626d7502015-09-04 14:05:51 +0200155 memblock_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100156}
157
158/*
159 * Called during boot before the p2m list can take entries beyond the
160 * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
161 * invalid.
162 */
163unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
164{
165 int i;
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100166
167 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
Juergen Gross626d7502015-09-04 14:05:51 +0200168 if (pfn >= xen_extra_mem[i].start_pfn &&
169 pfn < xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns)
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100170 return INVALID_P2M_ENTRY;
171 }
172
173 return IDENTITY_FRAME(pfn);
174}
175
176/*
177 * Mark all pfns of extra mem as invalid in p2m list.
178 */
179void __init xen_inv_extra_mem(void)
180{
181 unsigned long pfn, pfn_s, pfn_e;
182 int i;
183
184 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
Juergen Gross626d7502015-09-04 14:05:51 +0200185 if (!xen_extra_mem[i].n_pfns)
Juergen Gross9a17ad72015-01-12 06:05:10 +0100186 continue;
Juergen Gross626d7502015-09-04 14:05:51 +0200187 pfn_s = xen_extra_mem[i].start_pfn;
188 pfn_e = pfn_s + xen_extra_mem[i].n_pfns;
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100189 for (pfn = pfn_s; pfn < pfn_e; pfn++)
190 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
Konrad Rzeszutek Wilkc96aae12012-08-17 16:43:28 -0400191 }
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700192}
193
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700194/*
195 * Finds the next RAM pfn available in the E820 map after min_pfn.
196 * This function updates min_pfn with the pfn found and returns
197 * the size of that range or zero if not found.
198 */
Juergen Gross69632ec2015-07-17 06:51:26 +0200199static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn)
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400200{
Juergen Gross69632ec2015-07-17 06:51:26 +0200201 const struct e820entry *entry = xen_e820_map;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400202 unsigned int i;
203 unsigned long done = 0;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400204
Juergen Gross69632ec2015-07-17 06:51:26 +0200205 for (i = 0; i < xen_e820_map_entries; i++, entry++) {
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400206 unsigned long s_pfn;
207 unsigned long e_pfn;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400208
209 if (entry->type != E820_RAM)
210 continue;
211
zhenzhong.duanc3d93f82012-07-18 13:06:39 +0800212 e_pfn = PFN_DOWN(entry->addr + entry->size);
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400213
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700214 /* We only care about E820 after this */
Zhenzhong Duanabed7d02015-10-27 15:19:52 -0400215 if (e_pfn <= *min_pfn)
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400216 continue;
217
zhenzhong.duanc3d93f82012-07-18 13:06:39 +0800218 s_pfn = PFN_UP(entry->addr);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700219
220 /* If min_pfn falls within the E820 entry, we want to start
221 * at the min_pfn PFN.
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400222 */
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700223 if (s_pfn <= *min_pfn) {
224 done = e_pfn - *min_pfn;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400225 } else {
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700226 done = e_pfn - s_pfn;
227 *min_pfn = s_pfn;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400228 }
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700229 break;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400230 }
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700231
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400232 return done;
233}
David Vrabel83d51ab2012-05-03 16:15:42 +0100234
Juergen Gross1f3ac862014-11-28 11:53:53 +0100235static int __init xen_free_mfn(unsigned long mfn)
236{
237 struct xen_memory_reservation reservation = {
238 .address_bits = 0,
239 .extent_order = 0,
240 .domid = DOMID_SELF
241 };
242
243 set_xen_guest_handle(reservation.extent_start, &mfn);
244 reservation.nr_extents = 1;
245
246 return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
247}
248
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700249/*
Juergen Gross1f3ac862014-11-28 11:53:53 +0100250 * This releases a chunk of memory and then does the identity map. It's used
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700251 * as a fallback if the remapping fails.
252 */
253static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
Juergen Gross5097cdf2015-07-17 06:51:27 +0200254 unsigned long end_pfn, unsigned long nr_pages)
David Vrabel83d51ab2012-05-03 16:15:42 +0100255{
Juergen Gross1f3ac862014-11-28 11:53:53 +0100256 unsigned long pfn, end;
257 int ret;
258
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700259 WARN_ON(start_pfn > end_pfn);
David Vrabel83d51ab2012-05-03 16:15:42 +0100260
David Vrabelbc7142c2015-01-07 11:01:08 +0000261 /* Release pages first. */
Juergen Gross1f3ac862014-11-28 11:53:53 +0100262 end = min(end_pfn, nr_pages);
263 for (pfn = start_pfn; pfn < end; pfn++) {
264 unsigned long mfn = pfn_to_mfn(pfn);
265
266 /* Make sure pfn exists to start with */
267 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
268 continue;
269
270 ret = xen_free_mfn(mfn);
271 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
272
273 if (ret == 1) {
Juergen Gross5097cdf2015-07-17 06:51:27 +0200274 xen_released_pages++;
Juergen Gross1f3ac862014-11-28 11:53:53 +0100275 if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
276 break;
Juergen Gross1f3ac862014-11-28 11:53:53 +0100277 } else
278 break;
279 }
280
David Vrabelbc7142c2015-01-07 11:01:08 +0000281 set_phys_range_identity(start_pfn, end_pfn);
David Vrabel83d51ab2012-05-03 16:15:42 +0100282}
283
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700284/*
Juergen Gross1f3ac862014-11-28 11:53:53 +0100285 * Helper function to update the p2m and m2p tables and kernel mapping.
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700286 */
Juergen Gross1f3ac862014-11-28 11:53:53 +0100287static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700288{
289 struct mmu_update update = {
Juergen Gross3ba5c862015-01-28 07:44:22 +0100290 .ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700291 .val = pfn
292 };
293
294 /* Update p2m */
Juergen Gross1f3ac862014-11-28 11:53:53 +0100295 if (!set_phys_to_machine(pfn, mfn)) {
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700296 WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
297 pfn, mfn);
Juergen Gross1f3ac862014-11-28 11:53:53 +0100298 BUG();
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700299 }
300
301 /* Update m2p */
302 if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
303 WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
304 mfn, pfn);
Juergen Gross1f3ac862014-11-28 11:53:53 +0100305 BUG();
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700306 }
307
Juergen Gross1f3ac862014-11-28 11:53:53 +0100308 /* Update kernel mapping, but not for highmem. */
Juergen Grosse86f9492015-01-12 06:05:09 +0100309 if (pfn >= PFN_UP(__pa(high_memory - 1)))
Juergen Gross1f3ac862014-11-28 11:53:53 +0100310 return;
311
312 if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
313 mfn_pte(mfn, PAGE_KERNEL), 0)) {
314 WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
315 mfn, pfn);
316 BUG();
317 }
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700318}
319
320/*
321 * This function updates the p2m and m2p tables with an identity map from
Juergen Gross1f3ac862014-11-28 11:53:53 +0100322 * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
323 * original allocation at remap_pfn. The information needed for remapping is
324 * saved in the memory itself to avoid the need for allocating buffers. The
325 * complete remap information is contained in a list of MFNs each containing
326 * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
327 * This enables us to preserve the original mfn sequence while doing the
328 * remapping at a time when the memory management is capable of allocating
329 * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
330 * its callers.
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700331 */
Juergen Gross1f3ac862014-11-28 11:53:53 +0100332static void __init xen_do_set_identity_and_remap_chunk(
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700333 unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
334{
Juergen Gross1f3ac862014-11-28 11:53:53 +0100335 unsigned long buf = (unsigned long)&xen_remap_buf;
336 unsigned long mfn_save, mfn;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700337 unsigned long ident_pfn_iter, remap_pfn_iter;
Juergen Gross1f3ac862014-11-28 11:53:53 +0100338 unsigned long ident_end_pfn = start_pfn + size;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700339 unsigned long left = size;
Juergen Gross1f3ac862014-11-28 11:53:53 +0100340 unsigned int i, chunk;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700341
342 WARN_ON(size == 0);
343
344 BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
345
Juergen Gross1f3ac862014-11-28 11:53:53 +0100346 mfn_save = virt_to_mfn(buf);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700347
Juergen Gross1f3ac862014-11-28 11:53:53 +0100348 for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
349 ident_pfn_iter < ident_end_pfn;
350 ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
351 chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700352
Juergen Gross1f3ac862014-11-28 11:53:53 +0100353 /* Map first pfn to xen_remap_buf */
354 mfn = pfn_to_mfn(ident_pfn_iter);
355 set_pte_mfn(buf, mfn, PAGE_KERNEL);
356
357 /* Save mapping information in page */
358 xen_remap_buf.next_area_mfn = xen_remap_mfn;
359 xen_remap_buf.target_pfn = remap_pfn_iter;
360 xen_remap_buf.size = chunk;
361 for (i = 0; i < chunk; i++)
362 xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
363
364 /* Put remap buf into list. */
365 xen_remap_mfn = mfn;
366
367 /* Set identity map */
David Vrabelbc7142c2015-01-07 11:01:08 +0000368 set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700369
Juergen Gross1f3ac862014-11-28 11:53:53 +0100370 left -= chunk;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700371 }
372
Juergen Gross1f3ac862014-11-28 11:53:53 +0100373 /* Restore old xen_remap_buf mapping */
374 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700375}
376
377/*
378 * This function takes a contiguous pfn range that needs to be identity mapped
379 * and:
380 *
381 * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
382 * 2) Calls the do_ function to actually do the mapping/remapping work.
383 *
384 * The goal is to not allocate additional memory but to remap the existing
385 * pages. In the case of an error the underlying memory is simply released back
386 * to Xen and not remapped.
387 */
Juergen Gross76f0a482014-12-08 06:32:19 +0100388static unsigned long __init xen_set_identity_and_remap_chunk(
Juergen Gross69632ec2015-07-17 06:51:26 +0200389 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
Juergen Gross5097cdf2015-07-17 06:51:27 +0200390 unsigned long remap_pfn)
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700391{
392 unsigned long pfn;
393 unsigned long i = 0;
394 unsigned long n = end_pfn - start_pfn;
395
Juergen Grossdd14be92016-05-18 16:44:54 +0200396 if (remap_pfn == 0)
397 remap_pfn = nr_pages;
398
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700399 while (i < n) {
400 unsigned long cur_pfn = start_pfn + i;
401 unsigned long left = n - i;
402 unsigned long size = left;
403 unsigned long remap_range_size;
404
405 /* Do not remap pages beyond the current allocation */
406 if (cur_pfn >= nr_pages) {
407 /* Identity map remaining pages */
David Vrabelbc7142c2015-01-07 11:01:08 +0000408 set_phys_range_identity(cur_pfn, cur_pfn + size);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700409 break;
410 }
411 if (cur_pfn + size > nr_pages)
412 size = nr_pages - cur_pfn;
413
Juergen Gross69632ec2015-07-17 06:51:26 +0200414 remap_range_size = xen_find_pfn_range(&remap_pfn);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700415 if (!remap_range_size) {
416 pr_warning("Unable to find available pfn range, not remapping identity pages\n");
417 xen_set_identity_and_release_chunk(cur_pfn,
Juergen Gross5097cdf2015-07-17 06:51:27 +0200418 cur_pfn + left, nr_pages);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700419 break;
420 }
421 /* Adjust size to fit in current e820 RAM region */
422 if (size > remap_range_size)
423 size = remap_range_size;
424
Juergen Gross1f3ac862014-11-28 11:53:53 +0100425 xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700426
427 /* Update variables to reflect new mappings. */
428 i += size;
429 remap_pfn += size;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700430 }
431
432 /*
433 * If the PFNs are currently mapped, the VA mapping also needs
434 * to be updated to be 1:1.
435 */
436 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
437 (void)HYPERVISOR_update_va_mapping(
438 (unsigned long)__va(pfn << PAGE_SHIFT),
439 mfn_pte(pfn, PAGE_KERNEL_IO), 0);
440
441 return remap_pfn;
442}
443
Juergen Grossdd14be92016-05-18 16:44:54 +0200444static unsigned long __init xen_count_remap_pages(
445 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
446 unsigned long remap_pages)
447{
448 if (start_pfn >= nr_pages)
449 return remap_pages;
450
451 return remap_pages + min(end_pfn, nr_pages) - start_pfn;
452}
453
454static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
455 unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
456 unsigned long nr_pages, unsigned long last_val))
Miroslav Rezanina093d7b42009-09-16 03:56:17 -0400457{
David Vrabelf3f436e2011-09-28 17:46:36 +0100458 phys_addr_t start = 0;
Juergen Grossdd14be92016-05-18 16:44:54 +0200459 unsigned long ret_val = 0;
Juergen Gross69632ec2015-07-17 06:51:26 +0200460 const struct e820entry *entry = xen_e820_map;
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500461 int i;
462
David Vrabelf3f436e2011-09-28 17:46:36 +0100463 /*
464 * Combine non-RAM regions and gaps until a RAM region (or the
Juergen Grossdd14be92016-05-18 16:44:54 +0200465 * end of the map) is reached, then call the provided function
466 * to perform its duty on the non-RAM region.
David Vrabelf3f436e2011-09-28 17:46:36 +0100467 *
468 * The combined non-RAM regions are rounded to a whole number
469 * of pages so any partial pages are accessible via the 1:1
470 * mapping. This is needed for some BIOSes that put (for
471 * example) the DMI tables in a reserved region that begins on
472 * a non-page boundary.
473 */
Juergen Gross69632ec2015-07-17 06:51:26 +0200474 for (i = 0; i < xen_e820_map_entries; i++, entry++) {
David Vrabelf3f436e2011-09-28 17:46:36 +0100475 phys_addr_t end = entry->addr + entry->size;
Juergen Gross69632ec2015-07-17 06:51:26 +0200476 if (entry->type == E820_RAM || i == xen_e820_map_entries - 1) {
David Vrabelf3f436e2011-09-28 17:46:36 +0100477 unsigned long start_pfn = PFN_DOWN(start);
478 unsigned long end_pfn = PFN_UP(end);
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500479
David Vrabelf3f436e2011-09-28 17:46:36 +0100480 if (entry->type == E820_RAM)
481 end_pfn = PFN_UP(entry->addr);
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500482
David Vrabel83d51ab2012-05-03 16:15:42 +0100483 if (start_pfn < end_pfn)
Juergen Grossdd14be92016-05-18 16:44:54 +0200484 ret_val = func(start_pfn, end_pfn, nr_pages,
485 ret_val);
David Vrabelf3f436e2011-09-28 17:46:36 +0100486 start = end;
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500487 }
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500488 }
David Vrabelf3f436e2011-09-28 17:46:36 +0100489
Juergen Grossdd14be92016-05-18 16:44:54 +0200490 return ret_val;
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500491}
Juergen Gross1f3ac862014-11-28 11:53:53 +0100492
493/*
494 * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
495 * The remap information (which mfn remap to which pfn) is contained in the
496 * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
497 * This scheme allows to remap the different chunks in arbitrary order while
498 * the resulting mapping will be independant from the order.
499 */
500void __init xen_remap_memory(void)
501{
502 unsigned long buf = (unsigned long)&xen_remap_buf;
503 unsigned long mfn_save, mfn, pfn;
504 unsigned long remapped = 0;
505 unsigned int i;
506 unsigned long pfn_s = ~0UL;
507 unsigned long len = 0;
508
509 mfn_save = virt_to_mfn(buf);
510
511 while (xen_remap_mfn != INVALID_P2M_ENTRY) {
512 /* Map the remap information */
513 set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
514
515 BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
516
517 pfn = xen_remap_buf.target_pfn;
518 for (i = 0; i < xen_remap_buf.size; i++) {
519 mfn = xen_remap_buf.mfns[i];
520 xen_update_mem_tables(pfn, mfn);
521 remapped++;
522 pfn++;
523 }
524 if (pfn_s == ~0UL || pfn == pfn_s) {
525 pfn_s = xen_remap_buf.target_pfn;
526 len += xen_remap_buf.size;
527 } else if (pfn_s + len == xen_remap_buf.target_pfn) {
528 len += xen_remap_buf.size;
529 } else {
Juergen Gross626d7502015-09-04 14:05:51 +0200530 xen_del_extra_mem(pfn_s, len);
Juergen Gross1f3ac862014-11-28 11:53:53 +0100531 pfn_s = xen_remap_buf.target_pfn;
532 len = xen_remap_buf.size;
533 }
534
535 mfn = xen_remap_mfn;
536 xen_remap_mfn = xen_remap_buf.next_area_mfn;
537 }
538
539 if (pfn_s != ~0UL && len)
Juergen Gross626d7502015-09-04 14:05:51 +0200540 xen_del_extra_mem(pfn_s, len);
Juergen Gross1f3ac862014-11-28 11:53:53 +0100541
542 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
543
544 pr_info("Remapped %ld page(s)\n", remapped);
545}
546
Juergen Grossc70727a2015-07-17 06:51:36 +0200547static unsigned long __init xen_get_pages_limit(void)
548{
549 unsigned long limit;
550
551#ifdef CONFIG_X86_32
552 limit = GB(64) / PAGE_SIZE;
553#else
Juergen Grosscb9e4442015-09-04 14:18:08 +0200554 limit = MAXMEM / PAGE_SIZE;
Juergen Grossc70727a2015-07-17 06:51:36 +0200555 if (!xen_initial_domain() && xen_512gb_limit)
556 limit = GB(512) / PAGE_SIZE;
557#endif
558 return limit;
559}
560
David Vrabeld312ae872011-08-19 15:57:16 +0100561static unsigned long __init xen_get_max_pages(void)
562{
Juergen Grossc70727a2015-07-17 06:51:36 +0200563 unsigned long max_pages, limit;
David Vrabeld312ae872011-08-19 15:57:16 +0100564 domid_t domid = DOMID_SELF;
Juergen Gross24f775a2015-09-04 14:50:33 +0200565 long ret;
David Vrabeld312ae872011-08-19 15:57:16 +0100566
Juergen Grossc70727a2015-07-17 06:51:36 +0200567 limit = xen_get_pages_limit();
568 max_pages = limit;
569
Ian Campbelld3db7282011-12-14 12:16:08 +0000570 /*
571 * For the initial domain we use the maximum reservation as
572 * the maximum page.
573 *
574 * For guest domains the current maximum reservation reflects
575 * the current maximum rather than the static maximum. In this
576 * case the e820 map provided to us will cover the static
577 * maximum region.
578 */
579 if (xen_initial_domain()) {
580 ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
581 if (ret > 0)
582 max_pages = ret;
583 }
584
Juergen Grossc70727a2015-07-17 06:51:36 +0200585 return min(max_pages, limit);
David Vrabeld312ae872011-08-19 15:57:16 +0100586}
587
Juergen Grossa3f52392015-01-28 07:44:23 +0100588static void __init xen_align_and_add_e820_region(phys_addr_t start,
589 phys_addr_t size, int type)
David Vrabeldc91c722011-09-29 12:26:19 +0100590{
Juergen Gross3ba5c862015-01-28 07:44:22 +0100591 phys_addr_t end = start + size;
David Vrabeldc91c722011-09-29 12:26:19 +0100592
593 /* Align RAM regions to page boundaries. */
594 if (type == E820_RAM) {
595 start = PAGE_ALIGN(start);
Juergen Gross3ba5c862015-01-28 07:44:22 +0100596 end &= ~((phys_addr_t)PAGE_SIZE - 1);
David Vrabeldc91c722011-09-29 12:26:19 +0100597 }
598
599 e820_add_region(start, end - start, type);
600}
601
Juergen Gross69632ec2015-07-17 06:51:26 +0200602static void __init xen_ignore_unusable(void)
David Vrabel3bc38cb2013-08-16 15:42:55 +0100603{
Juergen Gross69632ec2015-07-17 06:51:26 +0200604 struct e820entry *entry = xen_e820_map;
David Vrabel3bc38cb2013-08-16 15:42:55 +0100605 unsigned int i;
606
Juergen Gross69632ec2015-07-17 06:51:26 +0200607 for (i = 0; i < xen_e820_map_entries; i++, entry++) {
David Vrabel3bc38cb2013-08-16 15:42:55 +0100608 if (entry->type == E820_UNUSABLE)
609 entry->type = E820_RAM;
610 }
611}
612
Juergen Grosse612b4a2015-07-17 06:51:28 +0200613bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
614{
615 struct e820entry *entry;
616 unsigned mapcnt;
617 phys_addr_t end;
618
619 if (!size)
620 return false;
621
622 end = start + size;
623 entry = xen_e820_map;
624
625 for (mapcnt = 0; mapcnt < xen_e820_map_entries; mapcnt++) {
626 if (entry->type == E820_RAM && entry->addr <= start &&
627 (entry->addr + entry->size) >= end)
628 return false;
629
630 entry++;
631 }
632
633 return true;
634}
635
Juergen Gross8f5b0c62015-07-17 06:51:25 +0200636/*
Juergen Gross9ddac5b2015-07-17 06:51:29 +0200637 * Find a free area in physical memory not yet reserved and compliant with
638 * E820 map.
639 * Used to relocate pre-allocated areas like initrd or p2m list which are in
640 * conflict with the to be used E820 map.
641 * In case no area is found, return 0. Otherwise return the physical address
642 * of the area which is already reserved for convenience.
643 */
644phys_addr_t __init xen_find_free_area(phys_addr_t size)
645{
646 unsigned mapcnt;
647 phys_addr_t addr, start;
648 struct e820entry *entry = xen_e820_map;
649
650 for (mapcnt = 0; mapcnt < xen_e820_map_entries; mapcnt++, entry++) {
651 if (entry->type != E820_RAM || entry->size < size)
652 continue;
653 start = entry->addr;
654 for (addr = start; addr < start + size; addr += PAGE_SIZE) {
655 if (!memblock_is_reserved(addr))
656 continue;
657 start = addr + PAGE_SIZE;
658 if (start + size > entry->addr + entry->size)
659 break;
660 }
661 if (addr >= start + size) {
662 memblock_reserve(start, size);
663 return start;
664 }
665 }
666
667 return 0;
668}
669
670/*
Juergen Gross4b9c1532015-07-17 06:51:32 +0200671 * Like memcpy, but with physical addresses for dest and src.
672 */
673static void __init xen_phys_memcpy(phys_addr_t dest, phys_addr_t src,
674 phys_addr_t n)
675{
676 phys_addr_t dest_off, src_off, dest_len, src_len, len;
677 void *from, *to;
678
679 while (n) {
680 dest_off = dest & ~PAGE_MASK;
681 src_off = src & ~PAGE_MASK;
682 dest_len = n;
683 if (dest_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off)
684 dest_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off;
685 src_len = n;
686 if (src_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off)
687 src_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off;
688 len = min(dest_len, src_len);
689 to = early_memremap(dest - dest_off, dest_len + dest_off);
690 from = early_memremap(src - src_off, src_len + src_off);
691 memcpy(to, from, len);
692 early_memunmap(to, dest_len + dest_off);
693 early_memunmap(from, src_len + src_off);
694 n -= len;
695 dest += len;
696 src += len;
697 }
698}
699
700/*
Juergen Gross8f5b0c62015-07-17 06:51:25 +0200701 * Reserve Xen mfn_list.
Juergen Gross8f5b0c62015-07-17 06:51:25 +0200702 */
703static void __init xen_reserve_xen_mfnlist(void)
704{
Juergen Gross70e61192015-07-17 06:51:35 +0200705 phys_addr_t start, size;
706
Juergen Gross8f5b0c62015-07-17 06:51:25 +0200707 if (xen_start_info->mfn_list >= __START_KERNEL_map) {
Juergen Gross70e61192015-07-17 06:51:35 +0200708 start = __pa(xen_start_info->mfn_list);
709 size = PFN_ALIGN(xen_start_info->nr_pages *
710 sizeof(unsigned long));
711 } else {
712 start = PFN_PHYS(xen_start_info->first_p2m_pfn);
713 size = PFN_PHYS(xen_start_info->nr_p2m_frames);
714 }
715
716 if (!xen_is_e820_reserved(start, size)) {
717 memblock_reserve(start, size);
Juergen Gross8f5b0c62015-07-17 06:51:25 +0200718 return;
719 }
720
Juergen Gross70e61192015-07-17 06:51:35 +0200721#ifdef CONFIG_X86_32
722 /*
723 * Relocating the p2m on 32 bit system to an arbitrary virtual address
724 * is not supported, so just give up.
725 */
726 xen_raw_console_write("Xen hypervisor allocated p2m list conflicts with E820 map\n");
727 BUG();
728#else
729 xen_relocate_p2m();
730#endif
Juergen Gross8f5b0c62015-07-17 06:51:25 +0200731}
732
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700733/**
734 * machine_specific_memory_setup - Hook for machine specific memory setup.
735 **/
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700736char * __init xen_memory_setup(void)
737{
Juergen Gross626d7502015-09-04 14:05:51 +0200738 unsigned long max_pfn, pfn_s, n_pfns;
Juergen Gross5097cdf2015-07-17 06:51:27 +0200739 phys_addr_t mem_end, addr, size, chunk_size;
740 u32 type;
Ian Campbell35ae11f2009-02-06 19:09:48 -0800741 int rc;
742 struct xen_memory_map memmap;
David Vrabeldc91c722011-09-29 12:26:19 +0100743 unsigned long max_pages;
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700744 unsigned long extra_pages = 0;
Ian Campbell35ae11f2009-02-06 19:09:48 -0800745 int i;
Ian Campbell9e9a5fc2010-09-02 16:16:00 +0100746 int op;
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700747
Juergen Grossc70727a2015-07-17 06:51:36 +0200748 xen_parse_512gb();
749 max_pfn = xen_get_pages_limit();
750 max_pfn = min(max_pfn, xen_start_info->nr_pages);
Ian Campbell35ae11f2009-02-06 19:09:48 -0800751 mem_end = PFN_PHYS(max_pfn);
752
753 memmap.nr_entries = E820MAX;
Juergen Gross69632ec2015-07-17 06:51:26 +0200754 set_xen_guest_handle(memmap.buffer, xen_e820_map);
Ian Campbell35ae11f2009-02-06 19:09:48 -0800755
Ian Campbell9e9a5fc2010-09-02 16:16:00 +0100756 op = xen_initial_domain() ?
757 XENMEM_machine_memory_map :
758 XENMEM_memory_map;
759 rc = HYPERVISOR_memory_op(op, &memmap);
Ian Campbell35ae11f2009-02-06 19:09:48 -0800760 if (rc == -ENOSYS) {
Ian Campbell9ec23a7f2010-10-28 11:32:29 -0700761 BUG_ON(xen_initial_domain());
Ian Campbell35ae11f2009-02-06 19:09:48 -0800762 memmap.nr_entries = 1;
Juergen Gross69632ec2015-07-17 06:51:26 +0200763 xen_e820_map[0].addr = 0ULL;
764 xen_e820_map[0].size = mem_end;
Ian Campbell35ae11f2009-02-06 19:09:48 -0800765 /* 8MB slack (to balance backend allocations). */
Juergen Gross69632ec2015-07-17 06:51:26 +0200766 xen_e820_map[0].size += 8ULL << 20;
767 xen_e820_map[0].type = E820_RAM;
Ian Campbell35ae11f2009-02-06 19:09:48 -0800768 rc = 0;
769 }
770 BUG_ON(rc);
Martin Kelly1ea644c82014-10-16 20:48:11 -0700771 BUG_ON(memmap.nr_entries == 0);
Juergen Gross69632ec2015-07-17 06:51:26 +0200772 xen_e820_map_entries = memmap.nr_entries;
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100773
David Vrabel3bc38cb2013-08-16 15:42:55 +0100774 /*
775 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
776 * regions, so if we're using the machine memory map leave the
777 * region as RAM as it is in the pseudo-physical map.
778 *
779 * UNUSABLE regions in domUs are not handled and will need
780 * a patch in the future.
781 */
782 if (xen_initial_domain())
Juergen Gross69632ec2015-07-17 06:51:26 +0200783 xen_ignore_unusable();
David Vrabel3bc38cb2013-08-16 15:42:55 +0100784
David Vrabeldc91c722011-09-29 12:26:19 +0100785 /* Make sure the Xen-supplied memory map is well-ordered. */
Malcolm Crossley64c98e72015-09-28 11:36:52 +0100786 sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map),
Juergen Gross69632ec2015-07-17 06:51:26 +0200787 &xen_e820_map_entries);
Jeremy Fitzhardingebe5bf9f2008-06-16 14:54:46 -0700788
David Vrabeldc91c722011-09-29 12:26:19 +0100789 max_pages = xen_get_max_pages();
Stefano Stabellini7cb31b72011-01-27 10:13:25 -0500790
Juergen Gross5097cdf2015-07-17 06:51:27 +0200791 /* How many extra pages do we need due to remapping? */
Juergen Grossdd14be92016-05-18 16:44:54 +0200792 max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);
Juergen Grosseafd72e2015-08-19 18:52:34 +0200793
794 if (max_pages > max_pfn)
795 extra_pages += max_pages - max_pfn;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400796
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400797 /*
David Vrabeldc91c722011-09-29 12:26:19 +0100798 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
799 * factor the base size. On non-highmem systems, the base
800 * size is the full initial memory allocation; on highmem it
801 * is limited to the max size of lowmem, so that it doesn't
802 * get completely filled.
803 *
Juergen Grossc70727a2015-07-17 06:51:36 +0200804 * Make sure we have no memory above max_pages, as this area
805 * isn't handled by the p2m management.
806 *
David Vrabeldc91c722011-09-29 12:26:19 +0100807 * In principle there could be a problem in lowmem systems if
808 * the initial memory is also very large with respect to
809 * lowmem, but we won't try to deal with that here.
810 */
Juergen Grossc70727a2015-07-17 06:51:36 +0200811 extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
812 extra_pages, max_pages - max_pfn);
David Vrabeldc91c722011-09-29 12:26:19 +0100813 i = 0;
Juergen Gross5097cdf2015-07-17 06:51:27 +0200814 addr = xen_e820_map[0].addr;
815 size = xen_e820_map[0].size;
Juergen Gross69632ec2015-07-17 06:51:26 +0200816 while (i < xen_e820_map_entries) {
David Vrabelf5775e02015-01-19 11:08:05 +0000817 bool discard = false;
818
Juergen Gross5097cdf2015-07-17 06:51:27 +0200819 chunk_size = size;
820 type = xen_e820_map[i].type;
David Vrabeldc91c722011-09-29 12:26:19 +0100821
822 if (type == E820_RAM) {
823 if (addr < mem_end) {
Juergen Gross5097cdf2015-07-17 06:51:27 +0200824 chunk_size = min(size, mem_end - addr);
David Vrabeldc91c722011-09-29 12:26:19 +0100825 } else if (extra_pages) {
Juergen Gross5097cdf2015-07-17 06:51:27 +0200826 chunk_size = min(size, PFN_PHYS(extra_pages));
Juergen Gross626d7502015-09-04 14:05:51 +0200827 pfn_s = PFN_UP(addr);
828 n_pfns = PFN_DOWN(addr + chunk_size) - pfn_s;
829 extra_pages -= n_pfns;
830 xen_add_extra_mem(pfn_s, n_pfns);
831 xen_max_p2m_pfn = pfn_s + n_pfns;
David Vrabeldc91c722011-09-29 12:26:19 +0100832 } else
David Vrabelf5775e02015-01-19 11:08:05 +0000833 discard = true;
Jeremy Fitzhardinge36545812010-09-29 16:54:33 -0700834 }
835
David Vrabelf5775e02015-01-19 11:08:05 +0000836 if (!discard)
837 xen_align_and_add_e820_region(addr, chunk_size, type);
Jeremy Fitzhardingeb5b43ce2010-09-02 17:10:12 -0700838
Juergen Gross5097cdf2015-07-17 06:51:27 +0200839 addr += chunk_size;
840 size -= chunk_size;
841 if (size == 0) {
David Vrabeldc91c722011-09-29 12:26:19 +0100842 i++;
Juergen Gross5097cdf2015-07-17 06:51:27 +0200843 if (i < xen_e820_map_entries) {
844 addr = xen_e820_map[i].addr;
845 size = xen_e820_map[i].size;
846 }
847 }
Ian Campbell35ae11f2009-02-06 19:09:48 -0800848 }
Jeremy Fitzhardingeb792c752008-06-16 14:54:49 -0700849
850 /*
David Vrabel25b884a2014-01-03 15:46:10 +0000851 * Set the rest as identity mapped, in case PCI BARs are
852 * located here.
David Vrabel25b884a2014-01-03 15:46:10 +0000853 */
Juergen Gross5097cdf2015-07-17 06:51:27 +0200854 set_phys_range_identity(addr / PAGE_SIZE, ~0ul);
David Vrabel25b884a2014-01-03 15:46:10 +0000855
856 /*
Ian Campbell9ec23a7f2010-10-28 11:32:29 -0700857 * In domU, the ISA region is normal, usable memory, but we
858 * reserve ISA memory anyway because too many things poke
Jeremy Fitzhardingeb792c752008-06-16 14:54:49 -0700859 * about in there.
860 */
861 e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
862 E820_RESERVED);
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700863
Jeremy Fitzhardingebe5bf9f2008-06-16 14:54:46 -0700864 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
865
Juergen Gross808fdb72015-07-17 06:51:30 +0200866 /*
867 * Check whether the kernel itself conflicts with the target E820 map.
868 * Failing now is better than running into weird problems later due
869 * to relocating (and even reusing) pages with kernel text or data.
870 */
871 if (xen_is_e820_reserved(__pa_symbol(_text),
872 __pa_symbol(__bss_stop) - __pa_symbol(_text))) {
873 xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
874 BUG();
875 }
876
Juergen Gross04414ba2015-07-17 06:51:31 +0200877 /*
878 * Check for a conflict of the hypervisor supplied page tables with
879 * the target E820 map.
880 */
881 xen_pt_check_e820();
882
Juergen Gross8f5b0c62015-07-17 06:51:25 +0200883 xen_reserve_xen_mfnlist();
884
Juergen Gross4b9c1532015-07-17 06:51:32 +0200885 /* Check for a conflict of the initrd with the target E820 map. */
886 if (xen_is_e820_reserved(boot_params.hdr.ramdisk_image,
887 boot_params.hdr.ramdisk_size)) {
888 phys_addr_t new_area, start, size;
889
890 new_area = xen_find_free_area(boot_params.hdr.ramdisk_size);
891 if (!new_area) {
892 xen_raw_console_write("Can't find new memory area for initrd needed due to E820 map conflict\n");
893 BUG();
894 }
895
896 start = boot_params.hdr.ramdisk_image;
897 size = boot_params.hdr.ramdisk_size;
898 xen_phys_memcpy(new_area, start, size);
899 pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n",
900 start, start + size, new_area, new_area + size);
901 memblock_free(start, size);
902 boot_params.hdr.ramdisk_image = new_area;
903 boot_params.ext_ramdisk_image = new_area >> 32;
904 }
905
Juergen Gross5097cdf2015-07-17 06:51:27 +0200906 /*
907 * Set identity map on non-RAM pages and prepare remapping the
908 * underlying RAM.
909 */
Juergen Grossdd14be92016-05-18 16:44:54 +0200910 xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);
911
912 pr_info("Released %ld page(s)\n", xen_released_pages);
Juergen Gross5097cdf2015-07-17 06:51:27 +0200913
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700914 return "Xen";
915}
916
Roland McGrathd2eea682007-07-20 00:31:43 -0700917/*
David Vrabelabacaad2014-06-02 17:58:01 +0100918 * Machine specific memory setup for auto-translated guests.
919 */
920char * __init xen_auto_xlated_memory_setup(void)
921{
David Vrabelabacaad2014-06-02 17:58:01 +0100922 struct xen_memory_map memmap;
923 int i;
924 int rc;
925
926 memmap.nr_entries = E820MAX;
Juergen Gross69632ec2015-07-17 06:51:26 +0200927 set_xen_guest_handle(memmap.buffer, xen_e820_map);
David Vrabelabacaad2014-06-02 17:58:01 +0100928
929 rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
930 if (rc < 0)
931 panic("No memory map (%d)\n", rc);
932
Juergen Gross69632ec2015-07-17 06:51:26 +0200933 xen_e820_map_entries = memmap.nr_entries;
David Vrabelabacaad2014-06-02 17:58:01 +0100934
Juergen Gross69632ec2015-07-17 06:51:26 +0200935 sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map),
936 &xen_e820_map_entries);
937
938 for (i = 0; i < xen_e820_map_entries; i++)
939 e820_add_region(xen_e820_map[i].addr, xen_e820_map[i].size,
940 xen_e820_map[i].type);
David Vrabelabacaad2014-06-02 17:58:01 +0100941
Juergen Gross70e61192015-07-17 06:51:35 +0200942 /* Remove p2m info, it is not needed. */
943 xen_start_info->mfn_list = 0;
944 xen_start_info->first_p2m_pfn = 0;
945 xen_start_info->nr_p2m_frames = 0;
David Vrabelabacaad2014-06-02 17:58:01 +0100946
947 return "Xen";
948}
949
950/*
Roland McGrathd2eea682007-07-20 00:31:43 -0700951 * Set the bit indicating "nosegneg" library variants should be used.
Jeremy Fitzhardinge6a52e4b2008-07-12 02:22:00 -0700952 * We only need to bother in pure 32-bit mode; compat 32-bit processes
953 * can have un-truncated segments, so wrapping around is allowed.
Roland McGrathd2eea682007-07-20 00:31:43 -0700954 */
Sam Ravnborg08b6d292008-01-30 13:33:25 +0100955static void __init fiddle_vdso(void)
Roland McGrathd2eea682007-07-20 00:31:43 -0700956{
Jeremy Fitzhardinge6a52e4b2008-07-12 02:22:00 -0700957#ifdef CONFIG_X86_32
Andy Lutomirski0a6d1fa2015-10-05 17:47:56 -0700958 u32 *mask = vdso_image_32.data +
959 vdso_image_32.sym_VDSO32_NOTE_MASK;
Roland McGrathd2eea682007-07-20 00:31:43 -0700960 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700961#endif
Roland McGrathd2eea682007-07-20 00:31:43 -0700962}
963
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400964static int register_callback(unsigned type, const void *func)
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700965{
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700966 struct callback_register callback = {
967 .type = type,
968 .address = XEN_CALLBACK(__KERNEL_CS, func),
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700969 .flags = CALLBACKF_mask_events,
970 };
971
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700972 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
973}
974
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400975void xen_enable_sysenter(void)
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700976{
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700977 int ret;
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700978 unsigned sysenter_feature;
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700979
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700980#ifdef CONFIG_X86_32
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700981 sysenter_feature = X86_FEATURE_SEP;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700982#else
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700983 sysenter_feature = X86_FEATURE_SYSENTER32;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700984#endif
985
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700986 if (!boot_cpu_has(sysenter_feature))
987 return;
988
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700989 ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700990 if(ret != 0)
991 setup_clear_cpu_cap(sysenter_feature);
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700992}
993
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400994void xen_enable_syscall(void)
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700995{
996#ifdef CONFIG_X86_64
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700997 int ret;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700998
999 ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
1000 if (ret != 0) {
Jeremy Fitzhardinged5303b82008-07-12 02:22:06 -07001001 printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -07001002 /* Pretty fatal; 64-bit userspace has no other
1003 mechanism for syscalls. */
1004 }
1005
1006 if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -07001007 ret = register_callback(CALLBACKTYPE_syscall32,
1008 xen_syscall32_target);
Jeremy Fitzhardinged5303b82008-07-12 02:22:06 -07001009 if (ret != 0)
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -07001010 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -07001011 }
1012#endif /* CONFIG_X86_64 */
1013}
David Vrabelea9f9272014-06-16 13:07:00 +02001014
Mukesh Rathord285d682013-12-13 12:45:31 -05001015void __init xen_pvmmu_arch_setup(void)
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -07001016{
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -07001017 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
1018 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
1019
Mukesh Rathord285d682013-12-13 12:45:31 -05001020 HYPERVISOR_vm_assist(VMASST_CMD_enable,
1021 VMASST_TYPE_pae_extended_cr3);
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -07001022
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -07001023 if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
1024 register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
1025 BUG();
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -07001026
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -07001027 xen_enable_sysenter();
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -07001028 xen_enable_syscall();
Mukesh Rathord285d682013-12-13 12:45:31 -05001029}
1030
1031/* This function is not called for HVM domains */
1032void __init xen_arch_setup(void)
1033{
1034 xen_panic_handler_init();
1035 if (!xen_feature(XENFEAT_auto_translated_physmap))
1036 xen_pvmmu_arch_setup();
1037
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -07001038#ifdef CONFIG_ACPI
1039 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
1040 printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
1041 disable_acpi();
1042 }
1043#endif
1044
1045 memcpy(boot_command_line, xen_start_info->cmd_line,
1046 MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
1047 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
1048
Jeremy Fitzhardingebc15fde2010-11-22 17:17:50 -08001049 /* Set up idle, making sure it calls safe_halt() pvop */
Len Brownd91ee582011-04-01 18:28:35 -04001050 disable_cpuidle();
Konrad Rzeszutek Wilk48cdd822012-03-13 20:06:57 -04001051 disable_cpufreq();
Len Brown6a377dd2013-02-09 23:08:07 -05001052 WARN_ON(xen_set_default_idle());
Roland McGrathd2eea682007-07-20 00:31:43 -07001053 fiddle_vdso();
Konrad Rzeszutek Wilk8d54db792012-08-17 10:22:37 -04001054#ifdef CONFIG_NUMA
1055 numa_off = 1;
1056#endif
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -07001057}