Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Machine specific setup for xen |
| 3 | * |
| 4 | * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 |
| 5 | */ |
| 6 | |
| 7 | #include <linux/module.h> |
| 8 | #include <linux/sched.h> |
| 9 | #include <linux/mm.h> |
| 10 | #include <linux/pm.h> |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 11 | #include <linux/memblock.h> |
Len Brown | d91ee58 | 2011-04-01 18:28:35 -0400 | [diff] [blame] | 12 | #include <linux/cpuidle.h> |
Konrad Rzeszutek Wilk | 48cdd82 | 2012-03-13 20:06:57 -0400 | [diff] [blame] | 13 | #include <linux/cpufreq.h> |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 14 | |
| 15 | #include <asm/elf.h> |
Roland McGrath | 6c3652e | 2008-01-30 13:30:42 +0100 | [diff] [blame] | 16 | #include <asm/vdso.h> |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 17 | #include <asm/e820.h> |
| 18 | #include <asm/setup.h> |
Jeremy Fitzhardinge | b792c75 | 2008-06-16 14:54:49 -0700 | [diff] [blame] | 19 | #include <asm/acpi.h> |
Konrad Rzeszutek Wilk | 8d54db79 | 2012-08-17 10:22:37 -0400 | [diff] [blame] | 20 | #include <asm/numa.h> |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 21 | #include <asm/xen/hypervisor.h> |
| 22 | #include <asm/xen/hypercall.h> |
| 23 | |
Ian Campbell | 45263cb | 2010-10-25 16:32:29 -0700 | [diff] [blame] | 24 | #include <xen/xen.h> |
Jeremy Fitzhardinge | 8006ec3 | 2008-05-26 23:31:19 +0100 | [diff] [blame] | 25 | #include <xen/page.h> |
Jeremy Fitzhardinge | e2a81ba | 2008-03-17 16:37:17 -0700 | [diff] [blame] | 26 | #include <xen/interface/callback.h> |
Ian Campbell | 35ae11f | 2009-02-06 19:09:48 -0800 | [diff] [blame] | 27 | #include <xen/interface/memory.h> |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 28 | #include <xen/interface/physdev.h> |
| 29 | #include <xen/features.h> |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 30 | #include "xen-ops.h" |
Roland McGrath | d2eea68 | 2007-07-20 00:31:43 -0700 | [diff] [blame] | 31 | #include "vdso.h" |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 32 | #include "p2m.h" |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 33 | |
| 34 | /* These are code, but not functions. Defined in entry.S */ |
| 35 | extern const char xen_hypervisor_callback[]; |
| 36 | extern const char xen_failsafe_callback[]; |
Konrad Rzeszutek Wilk | 6efa20e | 2013-07-19 11:51:31 -0400 | [diff] [blame] | 37 | #ifdef CONFIG_X86_64 |
Andi Kleen | 07ba06d | 2013-10-22 09:07:59 -0700 | [diff] [blame] | 38 | extern asmlinkage void nmi(void); |
Konrad Rzeszutek Wilk | 6efa20e | 2013-07-19 11:51:31 -0400 | [diff] [blame] | 39 | #endif |
Tej | f63c2f2 | 2008-12-16 11:56:06 -0800 | [diff] [blame] | 40 | extern void xen_sysenter_target(void); |
| 41 | extern void xen_syscall_target(void); |
| 42 | extern void xen_syscall32_target(void); |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 43 | |
Jeremy Fitzhardinge | 42ee147 | 2010-08-30 16:41:02 -0700 | [diff] [blame] | 44 | /* Amount of extra memory space we add to the e820 ranges */ |
David Vrabel | 8b5d44a | 2011-09-28 17:46:34 +0100 | [diff] [blame] | 45 | struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; |
Jeremy Fitzhardinge | 42ee147 | 2010-08-30 16:41:02 -0700 | [diff] [blame] | 46 | |
David Vrabel | aa24411 | 2011-09-28 17:46:32 +0100 | [diff] [blame] | 47 | /* Number of pages released from the initial allocation. */ |
| 48 | unsigned long xen_released_pages; |
| 49 | |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 50 | /* Buffer used to remap identity mapped pages */ |
| 51 | unsigned long xen_remap_buf[P2M_PER_PAGE] __initdata; |
| 52 | |
Jeremy Fitzhardinge | 698bb8d | 2010-09-14 10:19:14 -0700 | [diff] [blame] | 53 | /* |
| 54 | * The maximum amount of extra memory compared to the base size. The |
| 55 | * main scaling factor is the size of struct page. At extreme ratios |
| 56 | * of base:extra, all the base memory can be filled with page |
| 57 | * structures for the extra memory, leaving no space for anything |
| 58 | * else. |
| 59 | * |
| 60 | * 10x seems like a reasonable balance between scaling flexibility and |
| 61 | * leaving a practically usable system. |
| 62 | */ |
| 63 | #define EXTRA_MEM_RATIO (10) |
| 64 | |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 65 | static void __init xen_add_extra_mem(u64 start, u64 size) |
Jeremy Fitzhardinge | 42ee147 | 2010-08-30 16:41:02 -0700 | [diff] [blame] | 66 | { |
Konrad Rzeszutek Wilk | 6eaa412 | 2011-01-18 20:09:41 -0500 | [diff] [blame] | 67 | unsigned long pfn; |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 68 | int i; |
Konrad Rzeszutek Wilk | 6eaa412 | 2011-01-18 20:09:41 -0500 | [diff] [blame] | 69 | |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 70 | for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { |
| 71 | /* Add new region. */ |
| 72 | if (xen_extra_mem[i].size == 0) { |
| 73 | xen_extra_mem[i].start = start; |
| 74 | xen_extra_mem[i].size = size; |
| 75 | break; |
| 76 | } |
| 77 | /* Append to existing region. */ |
| 78 | if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) { |
| 79 | xen_extra_mem[i].size += size; |
| 80 | break; |
| 81 | } |
| 82 | } |
| 83 | if (i == XEN_EXTRA_MEM_MAX_REGIONS) |
| 84 | printk(KERN_WARNING "Warning: not enough extra memory regions\n"); |
Jeremy Fitzhardinge | 42ee147 | 2010-08-30 16:41:02 -0700 | [diff] [blame] | 85 | |
Tejun Heo | d4bbf7e | 2011-11-28 09:46:22 -0800 | [diff] [blame] | 86 | memblock_reserve(start, size); |
Jeremy Fitzhardinge | 42ee147 | 2010-08-30 16:41:02 -0700 | [diff] [blame] | 87 | |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 88 | xen_max_p2m_pfn = PFN_DOWN(start + size); |
Konrad Rzeszutek Wilk | c96aae1 | 2012-08-17 16:43:28 -0400 | [diff] [blame] | 89 | for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) { |
| 90 | unsigned long mfn = pfn_to_mfn(pfn); |
Jeremy Fitzhardinge | 42ee147 | 2010-08-30 16:41:02 -0700 | [diff] [blame] | 91 | |
David Vrabel | 2dcc9a3 | 2014-01-07 11:36:53 +0000 | [diff] [blame] | 92 | if (WARN_ONCE(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn)) |
Konrad Rzeszutek Wilk | c96aae1 | 2012-08-17 16:43:28 -0400 | [diff] [blame] | 93 | continue; |
David Vrabel | 2dcc9a3 | 2014-01-07 11:36:53 +0000 | [diff] [blame] | 94 | WARN_ONCE(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n", |
| 95 | pfn, mfn); |
Konrad Rzeszutek Wilk | c96aae1 | 2012-08-17 16:43:28 -0400 | [diff] [blame] | 96 | |
Konrad Rzeszutek Wilk | 6eaa412 | 2011-01-18 20:09:41 -0500 | [diff] [blame] | 97 | __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); |
Konrad Rzeszutek Wilk | c96aae1 | 2012-08-17 16:43:28 -0400 | [diff] [blame] | 98 | } |
Jeremy Fitzhardinge | 42ee147 | 2010-08-30 16:41:02 -0700 | [diff] [blame] | 99 | } |
| 100 | |
Konrad Rzeszutek Wilk | 96dc08b | 2012-04-06 16:10:20 -0400 | [diff] [blame] | 101 | static unsigned long __init xen_do_chunk(unsigned long start, |
| 102 | unsigned long end, bool release) |
Miroslav Rezanina | 093d7b4 | 2009-09-16 03:56:17 -0400 | [diff] [blame] | 103 | { |
| 104 | struct xen_memory_reservation reservation = { |
| 105 | .address_bits = 0, |
| 106 | .extent_order = 0, |
| 107 | .domid = DOMID_SELF |
| 108 | }; |
Jeremy Fitzhardinge | f89e048 | 2009-09-16 12:38:33 -0700 | [diff] [blame] | 109 | unsigned long len = 0; |
Miroslav Rezanina | 093d7b4 | 2009-09-16 03:56:17 -0400 | [diff] [blame] | 110 | unsigned long pfn; |
| 111 | int ret; |
| 112 | |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 113 | for (pfn = start; pfn < end; pfn++) { |
| 114 | unsigned long frame; |
Konrad Rzeszutek Wilk | 96dc08b | 2012-04-06 16:10:20 -0400 | [diff] [blame] | 115 | unsigned long mfn = pfn_to_mfn(pfn); |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 116 | |
Konrad Rzeszutek Wilk | 96dc08b | 2012-04-06 16:10:20 -0400 | [diff] [blame] | 117 | if (release) { |
| 118 | /* Make sure pfn exists to start with */ |
| 119 | if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn) |
| 120 | continue; |
| 121 | frame = mfn; |
| 122 | } else { |
David Vrabel | 562658f | 2014-06-02 17:53:06 +0100 | [diff] [blame] | 123 | if (mfn != INVALID_P2M_ENTRY) |
Konrad Rzeszutek Wilk | 96dc08b | 2012-04-06 16:10:20 -0400 | [diff] [blame] | 124 | continue; |
| 125 | frame = pfn; |
| 126 | } |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 127 | set_xen_guest_handle(reservation.extent_start, &frame); |
| 128 | reservation.nr_extents = 1; |
| 129 | |
Konrad Rzeszutek Wilk | 96dc08b | 2012-04-06 16:10:20 -0400 | [diff] [blame] | 130 | ret = HYPERVISOR_memory_op(release ? XENMEM_decrease_reservation : XENMEM_populate_physmap, |
| 131 | &reservation); |
| 132 | WARN(ret != 1, "Failed to %s pfn %lx err=%d\n", |
| 133 | release ? "release" : "populate", pfn, ret); |
| 134 | |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 135 | if (ret == 1) { |
Konrad Rzeszutek Wilk | 96dc08b | 2012-04-06 16:10:20 -0400 | [diff] [blame] | 136 | if (!early_set_phys_to_machine(pfn, release ? INVALID_P2M_ENTRY : frame)) { |
| 137 | if (release) |
| 138 | break; |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 139 | set_xen_guest_handle(reservation.extent_start, &frame); |
| 140 | reservation.nr_extents = 1; |
| 141 | ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, |
Konrad Rzeszutek Wilk | 96dc08b | 2012-04-06 16:10:20 -0400 | [diff] [blame] | 142 | &reservation); |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 143 | break; |
| 144 | } |
| 145 | len++; |
| 146 | } else |
| 147 | break; |
| 148 | } |
| 149 | if (len) |
Konrad Rzeszutek Wilk | 96dc08b | 2012-04-06 16:10:20 -0400 | [diff] [blame] | 150 | printk(KERN_INFO "%s %lx-%lx pfn range: %lu pages %s\n", |
| 151 | release ? "Freeing" : "Populating", |
| 152 | start, end, len, |
| 153 | release ? "freed" : "added"); |
| 154 | |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 155 | return len; |
| 156 | } |
David Vrabel | 83d51ab | 2012-05-03 16:15:42 +0100 | [diff] [blame] | 157 | |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 158 | /* |
| 159 | * Finds the next RAM pfn available in the E820 map after min_pfn. |
| 160 | * This function updates min_pfn with the pfn found and returns |
| 161 | * the size of that range or zero if not found. |
| 162 | */ |
| 163 | static unsigned long __init xen_find_pfn_range( |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 164 | const struct e820entry *list, size_t map_size, |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 165 | unsigned long *min_pfn) |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 166 | { |
| 167 | const struct e820entry *entry; |
| 168 | unsigned int i; |
| 169 | unsigned long done = 0; |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 170 | |
| 171 | for (i = 0, entry = list; i < map_size; i++, entry++) { |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 172 | unsigned long s_pfn; |
| 173 | unsigned long e_pfn; |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 174 | |
| 175 | if (entry->type != E820_RAM) |
| 176 | continue; |
| 177 | |
zhenzhong.duan | c3d93f8 | 2012-07-18 13:06:39 +0800 | [diff] [blame] | 178 | e_pfn = PFN_DOWN(entry->addr + entry->size); |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 179 | |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 180 | /* We only care about E820 after this */ |
| 181 | if (e_pfn < *min_pfn) |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 182 | continue; |
| 183 | |
zhenzhong.duan | c3d93f8 | 2012-07-18 13:06:39 +0800 | [diff] [blame] | 184 | s_pfn = PFN_UP(entry->addr); |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 185 | |
| 186 | /* If min_pfn falls within the E820 entry, we want to start |
| 187 | * at the min_pfn PFN. |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 188 | */ |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 189 | if (s_pfn <= *min_pfn) { |
| 190 | done = e_pfn - *min_pfn; |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 191 | } else { |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 192 | done = e_pfn - s_pfn; |
| 193 | *min_pfn = s_pfn; |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 194 | } |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 195 | break; |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 196 | } |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 197 | |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 198 | return done; |
| 199 | } |
David Vrabel | 83d51ab | 2012-05-03 16:15:42 +0100 | [diff] [blame] | 200 | |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 201 | /* |
| 202 | * This releases a chunk of memory and then does the identity map. It's used as |
| 203 | * as a fallback if the remapping fails. |
| 204 | */ |
| 205 | static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, |
| 206 | unsigned long end_pfn, unsigned long nr_pages, unsigned long *identity, |
| 207 | unsigned long *released) |
David Vrabel | 83d51ab | 2012-05-03 16:15:42 +0100 | [diff] [blame] | 208 | { |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 209 | WARN_ON(start_pfn > end_pfn); |
David Vrabel | 83d51ab | 2012-05-03 16:15:42 +0100 | [diff] [blame] | 210 | |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 211 | /* Need to release pages first */ |
| 212 | *released += xen_do_chunk(start_pfn, min(end_pfn, nr_pages), true); |
David Vrabel | 83d51ab | 2012-05-03 16:15:42 +0100 | [diff] [blame] | 213 | *identity += set_phys_range_identity(start_pfn, end_pfn); |
| 214 | } |
| 215 | |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 216 | /* |
| 217 | * Helper function to update both the p2m and m2p tables. |
| 218 | */ |
| 219 | static unsigned long __init xen_update_mem_tables(unsigned long pfn, |
| 220 | unsigned long mfn) |
| 221 | { |
| 222 | struct mmu_update update = { |
| 223 | .ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE, |
| 224 | .val = pfn |
| 225 | }; |
| 226 | |
| 227 | /* Update p2m */ |
| 228 | if (!early_set_phys_to_machine(pfn, mfn)) { |
| 229 | WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n", |
| 230 | pfn, mfn); |
| 231 | return false; |
| 232 | } |
| 233 | |
| 234 | /* Update m2p */ |
| 235 | if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) { |
| 236 | WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n", |
| 237 | mfn, pfn); |
| 238 | return false; |
| 239 | } |
| 240 | |
| 241 | return true; |
| 242 | } |
| 243 | |
| 244 | /* |
| 245 | * This function updates the p2m and m2p tables with an identity map from |
| 246 | * start_pfn to start_pfn+size and remaps the underlying RAM of the original |
| 247 | * allocation at remap_pfn. It must do so carefully in P2M_PER_PAGE sized blocks |
| 248 | * to not exhaust the reserved brk space. Doing it in properly aligned blocks |
| 249 | * ensures we only allocate the minimum required leaf pages in the p2m table. It |
| 250 | * copies the existing mfns from the p2m table under the 1:1 map, overwrites |
| 251 | * them with the identity map and then updates the p2m and m2p tables with the |
| 252 | * remapped memory. |
| 253 | */ |
| 254 | static unsigned long __init xen_do_set_identity_and_remap_chunk( |
| 255 | unsigned long start_pfn, unsigned long size, unsigned long remap_pfn) |
| 256 | { |
| 257 | unsigned long ident_pfn_iter, remap_pfn_iter; |
| 258 | unsigned long ident_start_pfn_align, remap_start_pfn_align; |
| 259 | unsigned long ident_end_pfn_align, remap_end_pfn_align; |
| 260 | unsigned long ident_boundary_pfn, remap_boundary_pfn; |
| 261 | unsigned long ident_cnt = 0; |
| 262 | unsigned long remap_cnt = 0; |
| 263 | unsigned long left = size; |
| 264 | unsigned long mod; |
| 265 | int i; |
| 266 | |
| 267 | WARN_ON(size == 0); |
| 268 | |
| 269 | BUG_ON(xen_feature(XENFEAT_auto_translated_physmap)); |
| 270 | |
| 271 | /* |
| 272 | * Determine the proper alignment to remap memory in P2M_PER_PAGE sized |
| 273 | * blocks. We need to keep track of both the existing pfn mapping and |
| 274 | * the new pfn remapping. |
| 275 | */ |
| 276 | mod = start_pfn % P2M_PER_PAGE; |
| 277 | ident_start_pfn_align = |
| 278 | mod ? (start_pfn - mod + P2M_PER_PAGE) : start_pfn; |
| 279 | mod = remap_pfn % P2M_PER_PAGE; |
| 280 | remap_start_pfn_align = |
| 281 | mod ? (remap_pfn - mod + P2M_PER_PAGE) : remap_pfn; |
| 282 | mod = (start_pfn + size) % P2M_PER_PAGE; |
| 283 | ident_end_pfn_align = start_pfn + size - mod; |
| 284 | mod = (remap_pfn + size) % P2M_PER_PAGE; |
| 285 | remap_end_pfn_align = remap_pfn + size - mod; |
| 286 | |
| 287 | /* Iterate over each p2m leaf node in each range */ |
| 288 | for (ident_pfn_iter = ident_start_pfn_align, remap_pfn_iter = remap_start_pfn_align; |
| 289 | ident_pfn_iter < ident_end_pfn_align && remap_pfn_iter < remap_end_pfn_align; |
| 290 | ident_pfn_iter += P2M_PER_PAGE, remap_pfn_iter += P2M_PER_PAGE) { |
| 291 | /* Check we aren't past the end */ |
| 292 | BUG_ON(ident_pfn_iter + P2M_PER_PAGE > start_pfn + size); |
| 293 | BUG_ON(remap_pfn_iter + P2M_PER_PAGE > remap_pfn + size); |
| 294 | |
| 295 | /* Save p2m mappings */ |
| 296 | for (i = 0; i < P2M_PER_PAGE; i++) |
| 297 | xen_remap_buf[i] = pfn_to_mfn(ident_pfn_iter + i); |
| 298 | |
| 299 | /* Set identity map which will free a p2m leaf */ |
| 300 | ident_cnt += set_phys_range_identity(ident_pfn_iter, |
| 301 | ident_pfn_iter + P2M_PER_PAGE); |
| 302 | |
| 303 | #ifdef DEBUG |
| 304 | /* Helps verify a p2m leaf has been freed */ |
| 305 | for (i = 0; i < P2M_PER_PAGE; i++) { |
| 306 | unsigned int pfn = ident_pfn_iter + i; |
| 307 | BUG_ON(pfn_to_mfn(pfn) != pfn); |
| 308 | } |
| 309 | #endif |
| 310 | /* Now remap memory */ |
| 311 | for (i = 0; i < P2M_PER_PAGE; i++) { |
| 312 | unsigned long mfn = xen_remap_buf[i]; |
| 313 | |
| 314 | /* This will use the p2m leaf freed above */ |
| 315 | if (!xen_update_mem_tables(remap_pfn_iter + i, mfn)) { |
| 316 | WARN(1, "Failed to update mem mapping for pfn=%ld mfn=%ld\n", |
| 317 | remap_pfn_iter + i, mfn); |
| 318 | return 0; |
| 319 | } |
| 320 | |
| 321 | remap_cnt++; |
| 322 | } |
| 323 | |
| 324 | left -= P2M_PER_PAGE; |
| 325 | } |
| 326 | |
| 327 | /* Max boundary space possible */ |
| 328 | BUG_ON(left > (P2M_PER_PAGE - 1) * 2); |
| 329 | |
| 330 | /* Now handle the boundary conditions */ |
| 331 | ident_boundary_pfn = start_pfn; |
| 332 | remap_boundary_pfn = remap_pfn; |
| 333 | for (i = 0; i < left; i++) { |
| 334 | unsigned long mfn; |
| 335 | |
| 336 | /* These two checks move from the start to end boundaries */ |
| 337 | if (ident_boundary_pfn == ident_start_pfn_align) |
| 338 | ident_boundary_pfn = ident_pfn_iter; |
| 339 | if (remap_boundary_pfn == remap_start_pfn_align) |
| 340 | remap_boundary_pfn = remap_pfn_iter; |
| 341 | |
| 342 | /* Check we aren't past the end */ |
| 343 | BUG_ON(ident_boundary_pfn >= start_pfn + size); |
| 344 | BUG_ON(remap_boundary_pfn >= remap_pfn + size); |
| 345 | |
| 346 | mfn = pfn_to_mfn(ident_boundary_pfn); |
| 347 | |
| 348 | if (!xen_update_mem_tables(remap_boundary_pfn, mfn)) { |
| 349 | WARN(1, "Failed to update mem mapping for pfn=%ld mfn=%ld\n", |
| 350 | remap_pfn_iter + i, mfn); |
| 351 | return 0; |
| 352 | } |
| 353 | remap_cnt++; |
| 354 | |
| 355 | ident_boundary_pfn++; |
| 356 | remap_boundary_pfn++; |
| 357 | } |
| 358 | |
| 359 | /* Finish up the identity map */ |
| 360 | if (ident_start_pfn_align >= ident_end_pfn_align) { |
| 361 | /* |
| 362 | * In this case we have an identity range which does not span an |
| 363 | * aligned block so everything needs to be identity mapped here. |
| 364 | * If we didn't check this we might remap too many pages since |
| 365 | * the align boundaries are not meaningful in this case. |
| 366 | */ |
| 367 | ident_cnt += set_phys_range_identity(start_pfn, |
| 368 | start_pfn + size); |
| 369 | } else { |
| 370 | /* Remapped above so check each end of the chunk */ |
| 371 | if (start_pfn < ident_start_pfn_align) |
| 372 | ident_cnt += set_phys_range_identity(start_pfn, |
| 373 | ident_start_pfn_align); |
| 374 | if (start_pfn + size > ident_pfn_iter) |
| 375 | ident_cnt += set_phys_range_identity(ident_pfn_iter, |
| 376 | start_pfn + size); |
| 377 | } |
| 378 | |
| 379 | BUG_ON(ident_cnt != size); |
| 380 | BUG_ON(remap_cnt != size); |
| 381 | |
| 382 | return size; |
| 383 | } |
| 384 | |
| 385 | /* |
| 386 | * This function takes a contiguous pfn range that needs to be identity mapped |
| 387 | * and: |
| 388 | * |
| 389 | * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn. |
| 390 | * 2) Calls the do_ function to actually do the mapping/remapping work. |
| 391 | * |
| 392 | * The goal is to not allocate additional memory but to remap the existing |
| 393 | * pages. In the case of an error the underlying memory is simply released back |
| 394 | * to Xen and not remapped. |
| 395 | */ |
| 396 | static unsigned long __init xen_set_identity_and_remap_chunk( |
| 397 | const struct e820entry *list, size_t map_size, unsigned long start_pfn, |
| 398 | unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn, |
| 399 | unsigned long *identity, unsigned long *remapped, |
| 400 | unsigned long *released) |
| 401 | { |
| 402 | unsigned long pfn; |
| 403 | unsigned long i = 0; |
| 404 | unsigned long n = end_pfn - start_pfn; |
| 405 | |
| 406 | while (i < n) { |
| 407 | unsigned long cur_pfn = start_pfn + i; |
| 408 | unsigned long left = n - i; |
| 409 | unsigned long size = left; |
| 410 | unsigned long remap_range_size; |
| 411 | |
| 412 | /* Do not remap pages beyond the current allocation */ |
| 413 | if (cur_pfn >= nr_pages) { |
| 414 | /* Identity map remaining pages */ |
| 415 | *identity += set_phys_range_identity(cur_pfn, |
| 416 | cur_pfn + size); |
| 417 | break; |
| 418 | } |
| 419 | if (cur_pfn + size > nr_pages) |
| 420 | size = nr_pages - cur_pfn; |
| 421 | |
| 422 | remap_range_size = xen_find_pfn_range(list, map_size, |
| 423 | &remap_pfn); |
| 424 | if (!remap_range_size) { |
| 425 | pr_warning("Unable to find available pfn range, not remapping identity pages\n"); |
| 426 | xen_set_identity_and_release_chunk(cur_pfn, |
| 427 | cur_pfn + left, nr_pages, identity, released); |
| 428 | break; |
| 429 | } |
| 430 | /* Adjust size to fit in current e820 RAM region */ |
| 431 | if (size > remap_range_size) |
| 432 | size = remap_range_size; |
| 433 | |
| 434 | if (!xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn)) { |
| 435 | WARN(1, "Failed to remap 1:1 memory cur_pfn=%ld size=%ld remap_pfn=%ld\n", |
| 436 | cur_pfn, size, remap_pfn); |
| 437 | xen_set_identity_and_release_chunk(cur_pfn, |
| 438 | cur_pfn + left, nr_pages, identity, released); |
| 439 | break; |
| 440 | } |
| 441 | |
| 442 | /* Update variables to reflect new mappings. */ |
| 443 | i += size; |
| 444 | remap_pfn += size; |
| 445 | *identity += size; |
| 446 | *remapped += size; |
| 447 | } |
| 448 | |
| 449 | /* |
| 450 | * If the PFNs are currently mapped, the VA mapping also needs |
| 451 | * to be updated to be 1:1. |
| 452 | */ |
| 453 | for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) |
| 454 | (void)HYPERVISOR_update_va_mapping( |
| 455 | (unsigned long)__va(pfn << PAGE_SHIFT), |
| 456 | mfn_pte(pfn, PAGE_KERNEL_IO), 0); |
| 457 | |
| 458 | return remap_pfn; |
| 459 | } |
| 460 | |
| 461 | static unsigned long __init xen_set_identity_and_remap( |
| 462 | const struct e820entry *list, size_t map_size, unsigned long nr_pages, |
| 463 | unsigned long *released) |
Miroslav Rezanina | 093d7b4 | 2009-09-16 03:56:17 -0400 | [diff] [blame] | 464 | { |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 465 | phys_addr_t start = 0; |
Konrad Rzeszutek Wilk | 68df0da | 2011-02-01 17:15:30 -0500 | [diff] [blame] | 466 | unsigned long identity = 0; |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 467 | unsigned long remapped = 0; |
| 468 | unsigned long last_pfn = nr_pages; |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 469 | const struct e820entry *entry; |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 470 | unsigned long num_released = 0; |
Konrad Rzeszutek Wilk | 68df0da | 2011-02-01 17:15:30 -0500 | [diff] [blame] | 471 | int i; |
| 472 | |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 473 | /* |
| 474 | * Combine non-RAM regions and gaps until a RAM region (or the |
| 475 | * end of the map) is reached, then set the 1:1 map and |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 476 | * remap the memory in those non-RAM regions. |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 477 | * |
| 478 | * The combined non-RAM regions are rounded to a whole number |
| 479 | * of pages so any partial pages are accessible via the 1:1 |
| 480 | * mapping. This is needed for some BIOSes that put (for |
| 481 | * example) the DMI tables in a reserved region that begins on |
| 482 | * a non-page boundary. |
| 483 | */ |
Konrad Rzeszutek Wilk | 68df0da | 2011-02-01 17:15:30 -0500 | [diff] [blame] | 484 | for (i = 0, entry = list; i < map_size; i++, entry++) { |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 485 | phys_addr_t end = entry->addr + entry->size; |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 486 | if (entry->type == E820_RAM || i == map_size - 1) { |
| 487 | unsigned long start_pfn = PFN_DOWN(start); |
| 488 | unsigned long end_pfn = PFN_UP(end); |
Konrad Rzeszutek Wilk | 68df0da | 2011-02-01 17:15:30 -0500 | [diff] [blame] | 489 | |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 490 | if (entry->type == E820_RAM) |
| 491 | end_pfn = PFN_UP(entry->addr); |
Konrad Rzeszutek Wilk | 68df0da | 2011-02-01 17:15:30 -0500 | [diff] [blame] | 492 | |
David Vrabel | 83d51ab | 2012-05-03 16:15:42 +0100 | [diff] [blame] | 493 | if (start_pfn < end_pfn) |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 494 | last_pfn = xen_set_identity_and_remap_chunk( |
| 495 | list, map_size, start_pfn, |
| 496 | end_pfn, nr_pages, last_pfn, |
| 497 | &identity, &remapped, |
| 498 | &num_released); |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 499 | start = end; |
Konrad Rzeszutek Wilk | 68df0da | 2011-02-01 17:15:30 -0500 | [diff] [blame] | 500 | } |
Konrad Rzeszutek Wilk | 68df0da | 2011-02-01 17:15:30 -0500 | [diff] [blame] | 501 | } |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 502 | |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 503 | *released = num_released; |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 504 | |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 505 | pr_info("Set %ld page(s) to 1-1 mapping\n", identity); |
| 506 | pr_info("Remapped %ld page(s), last_pfn=%ld\n", remapped, |
| 507 | last_pfn); |
| 508 | pr_info("Released %ld page(s)\n", num_released); |
| 509 | |
| 510 | return last_pfn; |
Konrad Rzeszutek Wilk | 68df0da | 2011-02-01 17:15:30 -0500 | [diff] [blame] | 511 | } |
David Vrabel | d312ae87 | 2011-08-19 15:57:16 +0100 | [diff] [blame] | 512 | static unsigned long __init xen_get_max_pages(void) |
| 513 | { |
| 514 | unsigned long max_pages = MAX_DOMAIN_PAGES; |
| 515 | domid_t domid = DOMID_SELF; |
| 516 | int ret; |
| 517 | |
Ian Campbell | d3db728 | 2011-12-14 12:16:08 +0000 | [diff] [blame] | 518 | /* |
| 519 | * For the initial domain we use the maximum reservation as |
| 520 | * the maximum page. |
| 521 | * |
| 522 | * For guest domains the current maximum reservation reflects |
| 523 | * the current maximum rather than the static maximum. In this |
| 524 | * case the e820 map provided to us will cover the static |
| 525 | * maximum region. |
| 526 | */ |
| 527 | if (xen_initial_domain()) { |
| 528 | ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid); |
| 529 | if (ret > 0) |
| 530 | max_pages = ret; |
| 531 | } |
| 532 | |
David Vrabel | d312ae87 | 2011-08-19 15:57:16 +0100 | [diff] [blame] | 533 | return min(max_pages, MAX_DOMAIN_PAGES); |
| 534 | } |
| 535 | |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 536 | static void xen_align_and_add_e820_region(u64 start, u64 size, int type) |
| 537 | { |
| 538 | u64 end = start + size; |
| 539 | |
| 540 | /* Align RAM regions to page boundaries. */ |
| 541 | if (type == E820_RAM) { |
| 542 | start = PAGE_ALIGN(start); |
| 543 | end &= ~((u64)PAGE_SIZE - 1); |
| 544 | } |
| 545 | |
| 546 | e820_add_region(start, end - start, type); |
| 547 | } |
| 548 | |
David Vrabel | 3bc38cb | 2013-08-16 15:42:55 +0100 | [diff] [blame] | 549 | void xen_ignore_unusable(struct e820entry *list, size_t map_size) |
| 550 | { |
| 551 | struct e820entry *entry; |
| 552 | unsigned int i; |
| 553 | |
| 554 | for (i = 0, entry = list; i < map_size; i++, entry++) { |
| 555 | if (entry->type == E820_UNUSABLE) |
| 556 | entry->type = E820_RAM; |
| 557 | } |
| 558 | } |
| 559 | |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 560 | /** |
| 561 | * machine_specific_memory_setup - Hook for machine specific memory setup. |
| 562 | **/ |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 563 | char * __init xen_memory_setup(void) |
| 564 | { |
Ian Campbell | 35ae11f | 2009-02-06 19:09:48 -0800 | [diff] [blame] | 565 | static struct e820entry map[E820MAX] __initdata; |
| 566 | |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 567 | unsigned long max_pfn = xen_start_info->nr_pages; |
Ian Campbell | 35ae11f | 2009-02-06 19:09:48 -0800 | [diff] [blame] | 568 | unsigned long long mem_end; |
| 569 | int rc; |
| 570 | struct xen_memory_map memmap; |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 571 | unsigned long max_pages; |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 572 | unsigned long last_pfn = 0; |
Jeremy Fitzhardinge | 42ee147 | 2010-08-30 16:41:02 -0700 | [diff] [blame] | 573 | unsigned long extra_pages = 0; |
Ian Campbell | 35ae11f | 2009-02-06 19:09:48 -0800 | [diff] [blame] | 574 | int i; |
Ian Campbell | 9e9a5fc | 2010-09-02 16:16:00 +0100 | [diff] [blame] | 575 | int op; |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 576 | |
Jeremy Fitzhardinge | 8006ec3 | 2008-05-26 23:31:19 +0100 | [diff] [blame] | 577 | max_pfn = min(MAX_DOMAIN_PAGES, max_pfn); |
Ian Campbell | 35ae11f | 2009-02-06 19:09:48 -0800 | [diff] [blame] | 578 | mem_end = PFN_PHYS(max_pfn); |
| 579 | |
| 580 | memmap.nr_entries = E820MAX; |
| 581 | set_xen_guest_handle(memmap.buffer, map); |
| 582 | |
Ian Campbell | 9e9a5fc | 2010-09-02 16:16:00 +0100 | [diff] [blame] | 583 | op = xen_initial_domain() ? |
| 584 | XENMEM_machine_memory_map : |
| 585 | XENMEM_memory_map; |
| 586 | rc = HYPERVISOR_memory_op(op, &memmap); |
Ian Campbell | 35ae11f | 2009-02-06 19:09:48 -0800 | [diff] [blame] | 587 | if (rc == -ENOSYS) { |
Ian Campbell | 9ec23a7f | 2010-10-28 11:32:29 -0700 | [diff] [blame] | 588 | BUG_ON(xen_initial_domain()); |
Ian Campbell | 35ae11f | 2009-02-06 19:09:48 -0800 | [diff] [blame] | 589 | memmap.nr_entries = 1; |
| 590 | map[0].addr = 0ULL; |
| 591 | map[0].size = mem_end; |
| 592 | /* 8MB slack (to balance backend allocations). */ |
| 593 | map[0].size += 8ULL << 20; |
| 594 | map[0].type = E820_RAM; |
| 595 | rc = 0; |
| 596 | } |
| 597 | BUG_ON(rc); |
Jeremy Fitzhardinge | 8006ec3 | 2008-05-26 23:31:19 +0100 | [diff] [blame] | 598 | |
David Vrabel | 3bc38cb | 2013-08-16 15:42:55 +0100 | [diff] [blame] | 599 | /* |
| 600 | * Xen won't allow a 1:1 mapping to be created to UNUSABLE |
| 601 | * regions, so if we're using the machine memory map leave the |
| 602 | * region as RAM as it is in the pseudo-physical map. |
| 603 | * |
| 604 | * UNUSABLE regions in domUs are not handled and will need |
| 605 | * a patch in the future. |
| 606 | */ |
| 607 | if (xen_initial_domain()) |
| 608 | xen_ignore_unusable(map, memmap.nr_entries); |
| 609 | |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 610 | /* Make sure the Xen-supplied memory map is well-ordered. */ |
| 611 | sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); |
Jeremy Fitzhardinge | be5bf9f | 2008-06-16 14:54:46 -0700 | [diff] [blame] | 612 | |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 613 | max_pages = xen_get_max_pages(); |
| 614 | if (max_pages > max_pfn) |
| 615 | extra_pages += max_pages - max_pfn; |
Stefano Stabellini | 7cb31b7 | 2011-01-27 10:13:25 -0500 | [diff] [blame] | 616 | |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 617 | /* |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 618 | * Set identity map on non-RAM pages and remap the underlying RAM. |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 619 | */ |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 620 | last_pfn = xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn, |
| 621 | &xen_released_pages); |
Jeremy Fitzhardinge | 42ee147 | 2010-08-30 16:41:02 -0700 | [diff] [blame] | 622 | |
Konrad Rzeszutek Wilk | 58b7b53 | 2012-05-29 12:36:43 -0400 | [diff] [blame] | 623 | extra_pages += xen_released_pages; |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 624 | |
| 625 | if (last_pfn > max_pfn) { |
| 626 | max_pfn = min(MAX_DOMAIN_PAGES, last_pfn); |
| 627 | mem_end = PFN_PHYS(max_pfn); |
| 628 | } |
| 629 | /* |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 630 | * Clamp the amount of extra memory to a EXTRA_MEM_RATIO |
| 631 | * factor the base size. On non-highmem systems, the base |
| 632 | * size is the full initial memory allocation; on highmem it |
| 633 | * is limited to the max size of lowmem, so that it doesn't |
| 634 | * get completely filled. |
| 635 | * |
| 636 | * In principle there could be a problem in lowmem systems if |
| 637 | * the initial memory is also very large with respect to |
| 638 | * lowmem, but we won't try to deal with that here. |
| 639 | */ |
| 640 | extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), |
| 641 | extra_pages); |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 642 | i = 0; |
| 643 | while (i < memmap.nr_entries) { |
| 644 | u64 addr = map[i].addr; |
| 645 | u64 size = map[i].size; |
| 646 | u32 type = map[i].type; |
| 647 | |
| 648 | if (type == E820_RAM) { |
| 649 | if (addr < mem_end) { |
| 650 | size = min(size, mem_end - addr); |
| 651 | } else if (extra_pages) { |
| 652 | size = min(size, (u64)extra_pages * PAGE_SIZE); |
| 653 | extra_pages -= size / PAGE_SIZE; |
| 654 | xen_add_extra_mem(addr, size); |
| 655 | } else |
| 656 | type = E820_UNUSABLE; |
Jeremy Fitzhardinge | 3654581 | 2010-09-29 16:54:33 -0700 | [diff] [blame] | 657 | } |
| 658 | |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 659 | xen_align_and_add_e820_region(addr, size, type); |
Jeremy Fitzhardinge | b5b43ce | 2010-09-02 17:10:12 -0700 | [diff] [blame] | 660 | |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 661 | map[i].addr += size; |
| 662 | map[i].size -= size; |
| 663 | if (map[i].size == 0) |
| 664 | i++; |
Ian Campbell | 35ae11f | 2009-02-06 19:09:48 -0800 | [diff] [blame] | 665 | } |
Jeremy Fitzhardinge | b792c75 | 2008-06-16 14:54:49 -0700 | [diff] [blame] | 666 | |
| 667 | /* |
David Vrabel | 25b884a | 2014-01-03 15:46:10 +0000 | [diff] [blame] | 668 | * Set the rest as identity mapped, in case PCI BARs are |
| 669 | * located here. |
| 670 | * |
| 671 | * PFNs above MAX_P2M_PFN are considered identity mapped as |
| 672 | * well. |
| 673 | */ |
| 674 | set_phys_range_identity(map[i-1].addr / PAGE_SIZE, ~0ul); |
| 675 | |
| 676 | /* |
Ian Campbell | 9ec23a7f | 2010-10-28 11:32:29 -0700 | [diff] [blame] | 677 | * In domU, the ISA region is normal, usable memory, but we |
| 678 | * reserve ISA memory anyway because too many things poke |
Jeremy Fitzhardinge | b792c75 | 2008-06-16 14:54:49 -0700 | [diff] [blame] | 679 | * about in there. |
| 680 | */ |
| 681 | e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, |
| 682 | E820_RESERVED); |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 683 | |
Jeremy Fitzhardinge | be5bf9f | 2008-06-16 14:54:46 -0700 | [diff] [blame] | 684 | /* |
| 685 | * Reserve Xen bits: |
| 686 | * - mfn_list |
| 687 | * - xen_start_info |
| 688 | * See comment above "struct start_info" in <xen/interface/xen.h> |
Konrad Rzeszutek Wilk | 51faaf2 | 2012-08-22 13:00:10 -0400 | [diff] [blame] | 689 | * We tried to make the the memblock_reserve more selective so |
| 690 | * that it would be clear what region is reserved. Sadly we ran |
| 691 | * in the problem wherein on a 64-bit hypervisor with a 32-bit |
| 692 | * initial domain, the pt_base has the cr3 value which is not |
| 693 | * neccessarily where the pagetable starts! As Jan put it: " |
| 694 | * Actually, the adjustment turns out to be correct: The page |
| 695 | * tables for a 32-on-64 dom0 get allocated in the order "first L1", |
| 696 | * "first L2", "first L3", so the offset to the page table base is |
| 697 | * indeed 2. When reading xen/include/public/xen.h's comment |
| 698 | * very strictly, this is not a violation (since there nothing is said |
| 699 | * that the first thing in the page table space is pointed to by |
| 700 | * pt_base; I admit that this seems to be implied though, namely |
| 701 | * do I think that it is implied that the page table space is the |
| 702 | * range [pt_base, pt_base + nt_pt_frames), whereas that |
| 703 | * range here indeed is [pt_base - 2, pt_base - 2 + nt_pt_frames), |
| 704 | * which - without a priori knowledge - the kernel would have |
| 705 | * difficulty to figure out)." - so lets just fall back to the |
| 706 | * easy way and reserve the whole region. |
Jeremy Fitzhardinge | be5bf9f | 2008-06-16 14:54:46 -0700 | [diff] [blame] | 707 | */ |
Tejun Heo | 24aa078 | 2011-07-12 11:16:06 +0200 | [diff] [blame] | 708 | memblock_reserve(__pa(xen_start_info->mfn_list), |
| 709 | xen_start_info->pt_base - xen_start_info->mfn_list); |
Jeremy Fitzhardinge | be5bf9f | 2008-06-16 14:54:46 -0700 | [diff] [blame] | 710 | |
| 711 | sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); |
| 712 | |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 713 | return "Xen"; |
| 714 | } |
| 715 | |
Roland McGrath | d2eea68 | 2007-07-20 00:31:43 -0700 | [diff] [blame] | 716 | /* |
David Vrabel | abacaad | 2014-06-02 17:58:01 +0100 | [diff] [blame] | 717 | * Machine specific memory setup for auto-translated guests. |
| 718 | */ |
| 719 | char * __init xen_auto_xlated_memory_setup(void) |
| 720 | { |
| 721 | static struct e820entry map[E820MAX] __initdata; |
| 722 | |
| 723 | struct xen_memory_map memmap; |
| 724 | int i; |
| 725 | int rc; |
| 726 | |
| 727 | memmap.nr_entries = E820MAX; |
| 728 | set_xen_guest_handle(memmap.buffer, map); |
| 729 | |
| 730 | rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap); |
| 731 | if (rc < 0) |
| 732 | panic("No memory map (%d)\n", rc); |
| 733 | |
| 734 | sanitize_e820_map(map, ARRAY_SIZE(map), &memmap.nr_entries); |
| 735 | |
| 736 | for (i = 0; i < memmap.nr_entries; i++) |
| 737 | e820_add_region(map[i].addr, map[i].size, map[i].type); |
| 738 | |
| 739 | memblock_reserve(__pa(xen_start_info->mfn_list), |
| 740 | xen_start_info->pt_base - xen_start_info->mfn_list); |
| 741 | |
| 742 | return "Xen"; |
| 743 | } |
| 744 | |
| 745 | /* |
Roland McGrath | d2eea68 | 2007-07-20 00:31:43 -0700 | [diff] [blame] | 746 | * Set the bit indicating "nosegneg" library variants should be used. |
Jeremy Fitzhardinge | 6a52e4b | 2008-07-12 02:22:00 -0700 | [diff] [blame] | 747 | * We only need to bother in pure 32-bit mode; compat 32-bit processes |
| 748 | * can have un-truncated segments, so wrapping around is allowed. |
Roland McGrath | d2eea68 | 2007-07-20 00:31:43 -0700 | [diff] [blame] | 749 | */ |
Sam Ravnborg | 08b6d29 | 2008-01-30 13:33:25 +0100 | [diff] [blame] | 750 | static void __init fiddle_vdso(void) |
Roland McGrath | d2eea68 | 2007-07-20 00:31:43 -0700 | [diff] [blame] | 751 | { |
Jeremy Fitzhardinge | 6a52e4b | 2008-07-12 02:22:00 -0700 | [diff] [blame] | 752 | #ifdef CONFIG_X86_32 |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 753 | /* |
| 754 | * This could be called before selected_vdso32 is initialized, so |
| 755 | * just fiddle with both possible images. vdso_image_32_syscall |
| 756 | * can't be selected, since it only exists on 64-bit systems. |
| 757 | */ |
Jeremy Fitzhardinge | 6a52e4b | 2008-07-12 02:22:00 -0700 | [diff] [blame] | 758 | u32 *mask; |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 759 | mask = vdso_image_32_int80.data + |
| 760 | vdso_image_32_int80.sym_VDSO32_NOTE_MASK; |
Jeremy Fitzhardinge | 6a52e4b | 2008-07-12 02:22:00 -0700 | [diff] [blame] | 761 | *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT; |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 762 | mask = vdso_image_32_sysenter.data + |
| 763 | vdso_image_32_sysenter.sym_VDSO32_NOTE_MASK; |
Roland McGrath | d2eea68 | 2007-07-20 00:31:43 -0700 | [diff] [blame] | 764 | *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT; |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 765 | #endif |
Roland McGrath | d2eea68 | 2007-07-20 00:31:43 -0700 | [diff] [blame] | 766 | } |
| 767 | |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 768 | static int register_callback(unsigned type, const void *func) |
Jeremy Fitzhardinge | e2a81ba | 2008-03-17 16:37:17 -0700 | [diff] [blame] | 769 | { |
Jeremy Fitzhardinge | 88459d4 | 2008-07-08 15:07:02 -0700 | [diff] [blame] | 770 | struct callback_register callback = { |
| 771 | .type = type, |
| 772 | .address = XEN_CALLBACK(__KERNEL_CS, func), |
Jeremy Fitzhardinge | e2a81ba | 2008-03-17 16:37:17 -0700 | [diff] [blame] | 773 | .flags = CALLBACKF_mask_events, |
| 774 | }; |
| 775 | |
Jeremy Fitzhardinge | 88459d4 | 2008-07-08 15:07:02 -0700 | [diff] [blame] | 776 | return HYPERVISOR_callback_op(CALLBACKOP_register, &callback); |
| 777 | } |
| 778 | |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 779 | void xen_enable_sysenter(void) |
Jeremy Fitzhardinge | 88459d4 | 2008-07-08 15:07:02 -0700 | [diff] [blame] | 780 | { |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 781 | int ret; |
Jeremy Fitzhardinge | 62541c3 | 2008-07-10 16:24:08 -0700 | [diff] [blame] | 782 | unsigned sysenter_feature; |
Jeremy Fitzhardinge | 88459d4 | 2008-07-08 15:07:02 -0700 | [diff] [blame] | 783 | |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 784 | #ifdef CONFIG_X86_32 |
Jeremy Fitzhardinge | 62541c3 | 2008-07-10 16:24:08 -0700 | [diff] [blame] | 785 | sysenter_feature = X86_FEATURE_SEP; |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 786 | #else |
Jeremy Fitzhardinge | 62541c3 | 2008-07-10 16:24:08 -0700 | [diff] [blame] | 787 | sysenter_feature = X86_FEATURE_SYSENTER32; |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 788 | #endif |
| 789 | |
Jeremy Fitzhardinge | 62541c3 | 2008-07-10 16:24:08 -0700 | [diff] [blame] | 790 | if (!boot_cpu_has(sysenter_feature)) |
| 791 | return; |
| 792 | |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 793 | ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target); |
Jeremy Fitzhardinge | 62541c3 | 2008-07-10 16:24:08 -0700 | [diff] [blame] | 794 | if(ret != 0) |
| 795 | setup_clear_cpu_cap(sysenter_feature); |
Jeremy Fitzhardinge | e2a81ba | 2008-03-17 16:37:17 -0700 | [diff] [blame] | 796 | } |
| 797 | |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 798 | void xen_enable_syscall(void) |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 799 | { |
| 800 | #ifdef CONFIG_X86_64 |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 801 | int ret; |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 802 | |
| 803 | ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target); |
| 804 | if (ret != 0) { |
Jeremy Fitzhardinge | d5303b8 | 2008-07-12 02:22:06 -0700 | [diff] [blame] | 805 | printk(KERN_ERR "Failed to set syscall callback: %d\n", ret); |
Jeremy Fitzhardinge | 62541c3 | 2008-07-10 16:24:08 -0700 | [diff] [blame] | 806 | /* Pretty fatal; 64-bit userspace has no other |
| 807 | mechanism for syscalls. */ |
| 808 | } |
| 809 | |
| 810 | if (boot_cpu_has(X86_FEATURE_SYSCALL32)) { |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 811 | ret = register_callback(CALLBACKTYPE_syscall32, |
| 812 | xen_syscall32_target); |
Jeremy Fitzhardinge | d5303b8 | 2008-07-12 02:22:06 -0700 | [diff] [blame] | 813 | if (ret != 0) |
Jeremy Fitzhardinge | 62541c3 | 2008-07-10 16:24:08 -0700 | [diff] [blame] | 814 | setup_clear_cpu_cap(X86_FEATURE_SYSCALL32); |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 815 | } |
| 816 | #endif /* CONFIG_X86_64 */ |
| 817 | } |
David Vrabel | ea9f927 | 2014-06-16 13:07:00 +0200 | [diff] [blame] | 818 | |
Mukesh Rathor | d285d68 | 2013-12-13 12:45:31 -0500 | [diff] [blame] | 819 | void __init xen_pvmmu_arch_setup(void) |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 820 | { |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 821 | HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments); |
| 822 | HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables); |
| 823 | |
Mukesh Rathor | d285d68 | 2013-12-13 12:45:31 -0500 | [diff] [blame] | 824 | HYPERVISOR_vm_assist(VMASST_CMD_enable, |
| 825 | VMASST_TYPE_pae_extended_cr3); |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 826 | |
Jeremy Fitzhardinge | 88459d4 | 2008-07-08 15:07:02 -0700 | [diff] [blame] | 827 | if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) || |
| 828 | register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback)) |
| 829 | BUG(); |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 830 | |
Jeremy Fitzhardinge | e2a81ba | 2008-03-17 16:37:17 -0700 | [diff] [blame] | 831 | xen_enable_sysenter(); |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 832 | xen_enable_syscall(); |
Mukesh Rathor | d285d68 | 2013-12-13 12:45:31 -0500 | [diff] [blame] | 833 | } |
| 834 | |
| 835 | /* This function is not called for HVM domains */ |
| 836 | void __init xen_arch_setup(void) |
| 837 | { |
| 838 | xen_panic_handler_init(); |
| 839 | if (!xen_feature(XENFEAT_auto_translated_physmap)) |
| 840 | xen_pvmmu_arch_setup(); |
| 841 | |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 842 | #ifdef CONFIG_ACPI |
| 843 | if (!(xen_start_info->flags & SIF_INITDOMAIN)) { |
| 844 | printk(KERN_INFO "ACPI in unprivileged domain disabled\n"); |
| 845 | disable_acpi(); |
| 846 | } |
| 847 | #endif |
| 848 | |
| 849 | memcpy(boot_command_line, xen_start_info->cmd_line, |
| 850 | MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ? |
| 851 | COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE); |
| 852 | |
Jeremy Fitzhardinge | bc15fde | 2010-11-22 17:17:50 -0800 | [diff] [blame] | 853 | /* Set up idle, making sure it calls safe_halt() pvop */ |
Len Brown | d91ee58 | 2011-04-01 18:28:35 -0400 | [diff] [blame] | 854 | disable_cpuidle(); |
Konrad Rzeszutek Wilk | 48cdd82 | 2012-03-13 20:06:57 -0400 | [diff] [blame] | 855 | disable_cpufreq(); |
Len Brown | 6a377dd | 2013-02-09 23:08:07 -0500 | [diff] [blame] | 856 | WARN_ON(xen_set_default_idle()); |
Roland McGrath | d2eea68 | 2007-07-20 00:31:43 -0700 | [diff] [blame] | 857 | fiddle_vdso(); |
Konrad Rzeszutek Wilk | 8d54db79 | 2012-08-17 10:22:37 -0400 | [diff] [blame] | 858 | #ifdef CONFIG_NUMA |
| 859 | numa_off = 1; |
| 860 | #endif |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 861 | } |