blob: bf286c386d330272fdb2e4d7cef99ddf108d489c [file] [log] [blame]
Huang, Ying5b836832008-01-30 13:31:19 +01001/*
2 * x86_64 specific EFI support functions
3 * Based on Extensible Firmware Interface Specification version 1.0
4 *
5 * Copyright (C) 2005-2008 Intel Co.
6 * Fenghua Yu <fenghua.yu@intel.com>
7 * Bibo Mao <bibo.mao@intel.com>
8 * Chandramouli Narayanan <mouli@linux.intel.com>
9 * Huang Ying <ying.huang@intel.com>
10 *
11 * Code to convert EFI to E820 map has been implemented in elilo bootloader
12 * based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table
13 * is setup appropriately for EFI runtime code.
14 * - mouli 06/14/2007.
15 *
16 */
17
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/mm.h>
21#include <linux/types.h>
22#include <linux/spinlock.h>
23#include <linux/bootmem.h>
24#include <linux/ioport.h>
25#include <linux/module.h>
26#include <linux/efi.h>
27#include <linux/uaccess.h>
28#include <linux/io.h>
29#include <linux/reboot.h>
David Howells0d01ff22013-04-11 23:51:01 +010030#include <linux/slab.h>
Huang, Ying5b836832008-01-30 13:31:19 +010031
32#include <asm/setup.h>
33#include <asm/page.h>
34#include <asm/e820.h>
35#include <asm/pgtable.h>
36#include <asm/tlbflush.h>
Huang, Ying5b836832008-01-30 13:31:19 +010037#include <asm/proto.h>
38#include <asm/efi.h>
Huang, Ying4de0d4a2008-02-13 17:22:41 +080039#include <asm/cacheflush.h>
Brian Gerst3819cd42009-01-23 11:03:29 +090040#include <asm/fixmap.h>
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +010041#include <asm/realmode.h>
Huang, Ying5b836832008-01-30 13:31:19 +010042
Nathan Zimmerb8f2c212013-01-08 09:02:43 -060043static pgd_t *save_pgd __initdata;
Huang, Ying5b836832008-01-30 13:31:19 +010044static unsigned long efi_flags __initdata;
45
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +010046/*
47 * We allocate runtime services regions bottom-up, starting from -4G, i.e.
48 * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G.
49 */
50static u64 efi_va = -4 * (1UL << 30);
51#define EFI_VA_END (-68 * (1UL << 30))
52
53/*
54 * Scratch space used for switching the pagetable in the EFI stub
55 */
56struct efi_scratch {
57 u64 r15;
58 u64 prev_cr3;
59 pgd_t *efi_pgt;
60 bool use_pgd;
61};
62
Matthew Garrett9cd2b072011-05-05 15:19:43 -040063static void __init early_code_mapping_set_exec(int executable)
Huang, Ying5b836832008-01-30 13:31:19 +010064{
65 efi_memory_desc_t *md;
66 void *p;
67
Huang, Yinga2172e22008-01-30 13:33:55 +010068 if (!(__supported_pte_mask & _PAGE_NX))
69 return;
70
Matthew Garrett916f6762011-05-25 09:53:13 -040071 /* Make EFI service code area executable */
Huang, Ying5b836832008-01-30 13:31:19 +010072 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
73 md = p;
Matthew Garrett916f6762011-05-25 09:53:13 -040074 if (md->type == EFI_RUNTIME_SERVICES_CODE ||
75 md->type == EFI_BOOT_SERVICES_CODE)
Matthew Garrett9cd2b072011-05-05 15:19:43 -040076 efi_set_executable(md, executable);
Huang, Ying5b836832008-01-30 13:31:19 +010077 }
78}
79
80void __init efi_call_phys_prelog(void)
81{
82 unsigned long vaddress;
Nathan Zimmerb8f2c212013-01-08 09:02:43 -060083 int pgd;
84 int n_pgds;
Huang, Ying5b836832008-01-30 13:31:19 +010085
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +010086 if (!efi_enabled(EFI_OLD_MEMMAP))
87 return;
88
Matthew Garrett9cd2b072011-05-05 15:19:43 -040089 early_code_mapping_set_exec(1);
Huang, Ying4de0d4a2008-02-13 17:22:41 +080090 local_irq_save(efi_flags);
Nathan Zimmerb8f2c212013-01-08 09:02:43 -060091
92 n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
93 save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL);
94
95 for (pgd = 0; pgd < n_pgds; pgd++) {
96 save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE);
97 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
98 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
99 }
Huang, Ying5b836832008-01-30 13:31:19 +0100100 __flush_tlb_all();
101}
102
103void __init efi_call_phys_epilog(void)
104{
105 /*
106 * After the lock is released, the original page table is restored.
107 */
Nathan Zimmerb8f2c212013-01-08 09:02:43 -0600108 int pgd;
109 int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +0100110
111 if (!efi_enabled(EFI_OLD_MEMMAP))
112 return;
113
Nathan Zimmerb8f2c212013-01-08 09:02:43 -0600114 for (pgd = 0; pgd < n_pgds; pgd++)
115 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
116 kfree(save_pgd);
Huang, Ying5b836832008-01-30 13:31:19 +0100117 __flush_tlb_all();
118 local_irq_restore(efi_flags);
Matthew Garrett9cd2b072011-05-05 15:19:43 -0400119 early_code_mapping_set_exec(0);
Huang, Ying5b836832008-01-30 13:31:19 +0100120}
Keith Packarde1ad7832011-12-11 16:12:42 -0800121
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +0100122/*
123 * Add low kernel mappings for passing arguments to EFI functions.
124 */
125void efi_sync_low_kernel_mappings(void)
126{
127 unsigned num_pgds;
128 pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
129
130 if (efi_enabled(EFI_OLD_MEMMAP))
131 return;
132
133 num_pgds = pgd_index(MODULES_END - 1) - pgd_index(PAGE_OFFSET);
134
135 memcpy(pgd + pgd_index(PAGE_OFFSET),
136 init_mm.pgd + pgd_index(PAGE_OFFSET),
137 sizeof(pgd_t) * num_pgds);
138}
139
140void efi_setup_page_tables(void)
141{
142 efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
143
144 if (!efi_enabled(EFI_OLD_MEMMAP))
145 efi_scratch.use_pgd = true;
146}
147
148static void __init __map_region(efi_memory_desc_t *md, u64 va)
149{
150 pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
151 unsigned long pf = 0, size;
152 u64 end;
153
154 if (!(md->attribute & EFI_MEMORY_WB))
155 pf |= _PAGE_PCD;
156
157 size = md->num_pages << PAGE_SHIFT;
158 end = va + size;
159
160 if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf))
161 pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
162 md->phys_addr, va);
163}
164
165void __init efi_map_region(efi_memory_desc_t *md)
166{
167 unsigned long size = md->num_pages << PAGE_SHIFT;
168 u64 pa = md->phys_addr;
169
170 if (efi_enabled(EFI_OLD_MEMMAP))
171 return old_map_region(md);
172
173 /*
174 * Make sure the 1:1 mappings are present as a catch-all for b0rked
175 * firmware which doesn't update all internal pointers after switching
176 * to virtual mode and would otherwise crap on us.
177 */
178 __map_region(md, md->phys_addr);
179
180 efi_va -= size;
181
182 /* Is PA 2M-aligned? */
183 if (!(pa & (PMD_SIZE - 1))) {
184 efi_va &= PMD_MASK;
185 } else {
186 u64 pa_offset = pa & (PMD_SIZE - 1);
187 u64 prev_va = efi_va;
188
189 /* get us the same offset within this 2M page */
190 efi_va = (efi_va & PMD_MASK) + pa_offset;
191
192 if (efi_va > prev_va)
193 efi_va -= PMD_SIZE;
194 }
195
196 if (efi_va < EFI_VA_END) {
197 pr_warn(FW_WARN "VA address range overflow!\n");
198 return;
199 }
200
201 /* Do the VA map */
202 __map_region(md, efi_va);
203 md->virt_addr = efi_va;
204}
205
Keith Packarde1ad7832011-12-11 16:12:42 -0800206void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
Matt Fleming3e8fa262012-10-19 13:25:46 +0100207 u32 type, u64 attribute)
Keith Packarde1ad7832011-12-11 16:12:42 -0800208{
209 unsigned long last_map_pfn;
210
211 if (type == EFI_MEMORY_MAPPED_IO)
212 return ioremap(phys_addr, size);
213
214 last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
215 if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
216 unsigned long top = last_map_pfn << PAGE_SHIFT;
Matt Fleming3e8fa262012-10-19 13:25:46 +0100217 efi_ioremap(top, size - (top - phys_addr), type, attribute);
Keith Packarde1ad7832011-12-11 16:12:42 -0800218 }
219
Matt Fleming3e8fa262012-10-19 13:25:46 +0100220 if (!(attribute & EFI_MEMORY_WB))
221 efi_memory_uc((u64)(unsigned long)__va(phys_addr), size);
222
Keith Packarde1ad7832011-12-11 16:12:42 -0800223 return (void __iomem *)__va(phys_addr);
224}