blob: d347e854a5e454ac614f8139e62f1267efbae86c [file] [log] [blame]
Huang, Ying5b836832008-01-30 13:31:19 +01001/*
2 * x86_64 specific EFI support functions
3 * Based on Extensible Firmware Interface Specification version 1.0
4 *
5 * Copyright (C) 2005-2008 Intel Co.
6 * Fenghua Yu <fenghua.yu@intel.com>
7 * Bibo Mao <bibo.mao@intel.com>
8 * Chandramouli Narayanan <mouli@linux.intel.com>
9 * Huang Ying <ying.huang@intel.com>
10 *
11 * Code to convert EFI to E820 map has been implemented in elilo bootloader
12 * based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table
13 * is setup appropriately for EFI runtime code.
14 * - mouli 06/14/2007.
15 *
16 */
17
Matt Fleming26d7f652015-10-25 10:26:35 +000018#define pr_fmt(fmt) "efi: " fmt
19
Huang, Ying5b836832008-01-30 13:31:19 +010020#include <linux/kernel.h>
21#include <linux/init.h>
22#include <linux/mm.h>
23#include <linux/types.h>
24#include <linux/spinlock.h>
25#include <linux/bootmem.h>
26#include <linux/ioport.h>
27#include <linux/module.h>
28#include <linux/efi.h>
29#include <linux/uaccess.h>
30#include <linux/io.h>
31#include <linux/reboot.h>
David Howells0d01ff22013-04-11 23:51:01 +010032#include <linux/slab.h>
Huang, Ying5b836832008-01-30 13:31:19 +010033
34#include <asm/setup.h>
35#include <asm/page.h>
36#include <asm/e820.h>
37#include <asm/pgtable.h>
38#include <asm/tlbflush.h>
Huang, Ying5b836832008-01-30 13:31:19 +010039#include <asm/proto.h>
40#include <asm/efi.h>
Huang, Ying4de0d4a2008-02-13 17:22:41 +080041#include <asm/cacheflush.h>
Brian Gerst3819cd42009-01-23 11:03:29 +090042#include <asm/fixmap.h>
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +010043#include <asm/realmode.h>
Matt Fleming4f9dbcf2014-01-10 18:48:30 +000044#include <asm/time.h>
Huang, Ying5b836832008-01-30 13:31:19 +010045
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +010046/*
47 * We allocate runtime services regions bottom-up, starting from -4G, i.e.
48 * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G.
49 */
Mathias Krause8266e312014-09-21 17:26:54 +020050static u64 efi_va = EFI_VA_START;
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +010051
52/*
53 * Scratch space used for switching the pagetable in the EFI stub
54 */
55struct efi_scratch {
56 u64 r15;
57 u64 prev_cr3;
58 pgd_t *efi_pgt;
59 bool use_pgd;
Matt Fleming4f9dbcf2014-01-10 18:48:30 +000060 u64 phys_stack;
61} __packed;
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +010062
Matthew Garrett9cd2b072011-05-05 15:19:43 -040063static void __init early_code_mapping_set_exec(int executable)
Huang, Ying5b836832008-01-30 13:31:19 +010064{
65 efi_memory_desc_t *md;
66 void *p;
67
Huang, Yinga2172e22008-01-30 13:33:55 +010068 if (!(__supported_pte_mask & _PAGE_NX))
69 return;
70
Matthew Garrett916f6762011-05-25 09:53:13 -040071 /* Make EFI service code area executable */
Huang, Ying5b836832008-01-30 13:31:19 +010072 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
73 md = p;
Matthew Garrett916f6762011-05-25 09:53:13 -040074 if (md->type == EFI_RUNTIME_SERVICES_CODE ||
75 md->type == EFI_BOOT_SERVICES_CODE)
Matthew Garrett9cd2b072011-05-05 15:19:43 -040076 efi_set_executable(md, executable);
Huang, Ying5b836832008-01-30 13:31:19 +010077 }
78}
79
Ingo Molnar744937b2015-03-03 07:48:50 +010080pgd_t * __init efi_call_phys_prolog(void)
Huang, Ying5b836832008-01-30 13:31:19 +010081{
82 unsigned long vaddress;
Ingo Molnar744937b2015-03-03 07:48:50 +010083 pgd_t *save_pgd;
84
Nathan Zimmerb8f2c212013-01-08 09:02:43 -060085 int pgd;
86 int n_pgds;
Huang, Ying5b836832008-01-30 13:31:19 +010087
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +010088 if (!efi_enabled(EFI_OLD_MEMMAP))
Ingo Molnar744937b2015-03-03 07:48:50 +010089 return NULL;
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +010090
Matthew Garrett9cd2b072011-05-05 15:19:43 -040091 early_code_mapping_set_exec(1);
Nathan Zimmerb8f2c212013-01-08 09:02:43 -060092
93 n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
94 save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL);
95
96 for (pgd = 0; pgd < n_pgds; pgd++) {
97 save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE);
98 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
99 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
100 }
Huang, Ying5b836832008-01-30 13:31:19 +0100101 __flush_tlb_all();
Ingo Molnar744937b2015-03-03 07:48:50 +0100102
103 return save_pgd;
Huang, Ying5b836832008-01-30 13:31:19 +0100104}
105
Ingo Molnar744937b2015-03-03 07:48:50 +0100106void __init efi_call_phys_epilog(pgd_t *save_pgd)
Huang, Ying5b836832008-01-30 13:31:19 +0100107{
108 /*
109 * After the lock is released, the original page table is restored.
110 */
Ingo Molnar744937b2015-03-03 07:48:50 +0100111 int pgd_idx;
112 int nr_pgds;
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +0100113
Ingo Molnar744937b2015-03-03 07:48:50 +0100114 if (!save_pgd)
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +0100115 return;
116
Ingo Molnar744937b2015-03-03 07:48:50 +0100117 nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
118
119 for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++)
120 set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
121
Nathan Zimmerb8f2c212013-01-08 09:02:43 -0600122 kfree(save_pgd);
Ingo Molnar744937b2015-03-03 07:48:50 +0100123
Huang, Ying5b836832008-01-30 13:31:19 +0100124 __flush_tlb_all();
Matthew Garrett9cd2b072011-05-05 15:19:43 -0400125 early_code_mapping_set_exec(0);
Huang, Ying5b836832008-01-30 13:31:19 +0100126}
Keith Packarde1ad7832011-12-11 16:12:42 -0800127
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +0100128/*
129 * Add low kernel mappings for passing arguments to EFI functions.
130 */
131void efi_sync_low_kernel_mappings(void)
132{
133 unsigned num_pgds;
134 pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
135
136 if (efi_enabled(EFI_OLD_MEMMAP))
137 return;
138
139 num_pgds = pgd_index(MODULES_END - 1) - pgd_index(PAGE_OFFSET);
140
141 memcpy(pgd + pgd_index(PAGE_OFFSET),
142 init_mm.pgd + pgd_index(PAGE_OFFSET),
143 sizeof(pgd_t) * num_pgds);
144}
145
Mathias Krause4e78eb052014-09-07 19:42:17 +0200146int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +0100147{
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000148 unsigned long text;
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000149 struct page *page;
Matt Fleming994448f2014-03-05 18:15:37 +0000150 unsigned npages;
Borislav Petkovb7b898a2014-01-18 12:48:17 +0100151 pgd_t *pgd;
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +0100152
Borislav Petkovb7b898a2014-01-18 12:48:17 +0100153 if (efi_enabled(EFI_OLD_MEMMAP))
154 return 0;
155
156 efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
157 pgd = __va(efi_scratch.efi_pgt);
158
159 /*
160 * It can happen that the physical address of new_memmap lands in memory
161 * which is not mapped in the EFI page table. Therefore we need to go
162 * and ident-map those pages containing the map before calling
163 * phys_efi_set_virtual_address_map().
164 */
165 if (kernel_map_pages_in_pgd(pgd, pa_memmap, pa_memmap, num_pages, _PAGE_NX)) {
166 pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
167 return 1;
168 }
169
170 efi_scratch.use_pgd = true;
171
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000172 /*
173 * When making calls to the firmware everything needs to be 1:1
174 * mapped and addressable with 32-bit pointers. Map the kernel
175 * text and allocate a new stack because we can't rely on the
176 * stack pointer being < 4GB.
177 */
178 if (!IS_ENABLED(CONFIG_EFI_MIXED))
Matt Fleming994448f2014-03-05 18:15:37 +0000179 return 0;
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000180
181 page = alloc_page(GFP_KERNEL|__GFP_DMA32);
182 if (!page)
183 panic("Unable to allocate EFI runtime stack < 4GB\n");
184
185 efi_scratch.phys_stack = virt_to_phys(page_address(page));
186 efi_scratch.phys_stack += PAGE_SIZE; /* stack grows down */
187
188 npages = (_end - _text) >> PAGE_SHIFT;
189 text = __pa(_text);
190
Matt Fleming994448f2014-03-05 18:15:37 +0000191 if (kernel_map_pages_in_pgd(pgd, text >> PAGE_SHIFT, text, npages, 0)) {
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000192 pr_err("Failed to map kernel text 1:1\n");
Matt Fleming994448f2014-03-05 18:15:37 +0000193 return 1;
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000194 }
Borislav Petkovb7b898a2014-01-18 12:48:17 +0100195
196 return 0;
197}
198
Mathias Krause4e78eb052014-09-07 19:42:17 +0200199void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages)
Borislav Petkovb7b898a2014-01-18 12:48:17 +0100200{
201 pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
202
203 kernel_unmap_pages_in_pgd(pgd, pa_memmap, num_pages);
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +0100204}
205
206static void __init __map_region(efi_memory_desc_t *md, u64 va)
207{
208 pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
Dave Young2da6e572013-12-20 18:02:13 +0800209 unsigned long pf = 0;
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +0100210
211 if (!(md->attribute & EFI_MEMORY_WB))
212 pf |= _PAGE_PCD;
213
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +0100214 if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf))
215 pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
216 md->phys_addr, va);
217}
218
219void __init efi_map_region(efi_memory_desc_t *md)
220{
221 unsigned long size = md->num_pages << PAGE_SHIFT;
222 u64 pa = md->phys_addr;
223
224 if (efi_enabled(EFI_OLD_MEMMAP))
225 return old_map_region(md);
226
227 /*
228 * Make sure the 1:1 mappings are present as a catch-all for b0rked
229 * firmware which doesn't update all internal pointers after switching
230 * to virtual mode and would otherwise crap on us.
231 */
232 __map_region(md, md->phys_addr);
233
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000234 /*
235 * Enforce the 1:1 mapping as the default virtual address when
236 * booting in EFI mixed mode, because even though we may be
237 * running a 64-bit kernel, the firmware may only be 32-bit.
238 */
239 if (!efi_is_native () && IS_ENABLED(CONFIG_EFI_MIXED)) {
240 md->virt_addr = md->phys_addr;
241 return;
242 }
243
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +0100244 efi_va -= size;
245
246 /* Is PA 2M-aligned? */
247 if (!(pa & (PMD_SIZE - 1))) {
248 efi_va &= PMD_MASK;
249 } else {
250 u64 pa_offset = pa & (PMD_SIZE - 1);
251 u64 prev_va = efi_va;
252
253 /* get us the same offset within this 2M page */
254 efi_va = (efi_va & PMD_MASK) + pa_offset;
255
256 if (efi_va > prev_va)
257 efi_va -= PMD_SIZE;
258 }
259
260 if (efi_va < EFI_VA_END) {
261 pr_warn(FW_WARN "VA address range overflow!\n");
262 return;
263 }
264
265 /* Do the VA map */
266 __map_region(md, efi_va);
267 md->virt_addr = efi_va;
268}
269
Dave Young3b266492013-12-20 18:02:14 +0800270/*
271 * kexec kernel will use efi_map_region_fixed to map efi runtime memory ranges.
272 * md->virt_addr is the original virtual address which had been mapped in kexec
273 * 1st kernel.
274 */
275void __init efi_map_region_fixed(efi_memory_desc_t *md)
276{
277 __map_region(md, md->virt_addr);
278}
279
Keith Packarde1ad7832011-12-11 16:12:42 -0800280void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
Matt Fleming3e8fa262012-10-19 13:25:46 +0100281 u32 type, u64 attribute)
Keith Packarde1ad7832011-12-11 16:12:42 -0800282{
283 unsigned long last_map_pfn;
284
285 if (type == EFI_MEMORY_MAPPED_IO)
286 return ioremap(phys_addr, size);
287
288 last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
289 if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
290 unsigned long top = last_map_pfn << PAGE_SHIFT;
Matt Fleming3e8fa262012-10-19 13:25:46 +0100291 efi_ioremap(top, size - (top - phys_addr), type, attribute);
Keith Packarde1ad7832011-12-11 16:12:42 -0800292 }
293
Matt Fleming3e8fa262012-10-19 13:25:46 +0100294 if (!(attribute & EFI_MEMORY_WB))
295 efi_memory_uc((u64)(unsigned long)__va(phys_addr), size);
296
Keith Packarde1ad7832011-12-11 16:12:42 -0800297 return (void __iomem *)__va(phys_addr);
298}
Dave Young1fec0532013-12-20 18:02:19 +0800299
300void __init parse_efi_setup(u64 phys_addr, u32 data_len)
301{
302 efi_setup = phys_addr + sizeof(struct setup_data);
Dave Young1fec0532013-12-20 18:02:19 +0800303}
Borislav Petkovc55d0162014-02-14 08:24:24 +0100304
305void __init efi_runtime_mkexec(void)
306{
307 if (!efi_enabled(EFI_OLD_MEMMAP))
308 return;
309
310 if (__supported_pte_mask & _PAGE_NX)
311 runtime_code_page_mkexec();
312}
Borislav Petkov11cc8512014-01-18 12:48:15 +0100313
314void __init efi_dump_pagetable(void)
315{
316#ifdef CONFIG_EFI_PGT_DUMP
317 pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
318
319 ptdump_walk_pgd_level(NULL, pgd);
320#endif
321}
Matt Fleming994448f2014-03-05 18:15:37 +0000322
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000323#ifdef CONFIG_EFI_MIXED
324extern efi_status_t efi64_thunk(u32, ...);
325
326#define runtime_service32(func) \
327({ \
328 u32 table = (u32)(unsigned long)efi.systab; \
329 u32 *rt, *___f; \
330 \
331 rt = (u32 *)(table + offsetof(efi_system_table_32_t, runtime)); \
332 ___f = (u32 *)(*rt + offsetof(efi_runtime_services_32_t, func)); \
333 *___f; \
334})
335
336/*
337 * Switch to the EFI page tables early so that we can access the 1:1
338 * runtime services mappings which are not mapped in any other page
339 * tables. This function must be called before runtime_service32().
340 *
341 * Also, disable interrupts because the IDT points to 64-bit handlers,
342 * which aren't going to function correctly when we switch to 32-bit.
343 */
344#define efi_thunk(f, ...) \
345({ \
346 efi_status_t __s; \
347 unsigned long flags; \
348 u32 func; \
349 \
350 efi_sync_low_kernel_mappings(); \
351 local_irq_save(flags); \
352 \
353 efi_scratch.prev_cr3 = read_cr3(); \
354 write_cr3((unsigned long)efi_scratch.efi_pgt); \
355 __flush_tlb_all(); \
356 \
357 func = runtime_service32(f); \
358 __s = efi64_thunk(func, __VA_ARGS__); \
359 \
360 write_cr3(efi_scratch.prev_cr3); \
361 __flush_tlb_all(); \
362 local_irq_restore(flags); \
363 \
364 __s; \
365})
366
367efi_status_t efi_thunk_set_virtual_address_map(
368 void *phys_set_virtual_address_map,
369 unsigned long memory_map_size,
370 unsigned long descriptor_size,
371 u32 descriptor_version,
372 efi_memory_desc_t *virtual_map)
373{
374 efi_status_t status;
375 unsigned long flags;
376 u32 func;
377
378 efi_sync_low_kernel_mappings();
379 local_irq_save(flags);
380
381 efi_scratch.prev_cr3 = read_cr3();
382 write_cr3((unsigned long)efi_scratch.efi_pgt);
383 __flush_tlb_all();
384
385 func = (u32)(unsigned long)phys_set_virtual_address_map;
386 status = efi64_thunk(func, memory_map_size, descriptor_size,
387 descriptor_version, virtual_map);
388
389 write_cr3(efi_scratch.prev_cr3);
390 __flush_tlb_all();
391 local_irq_restore(flags);
392
393 return status;
394}
395
396static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
397{
398 efi_status_t status;
399 u32 phys_tm, phys_tc;
400
401 spin_lock(&rtc_lock);
402
403 phys_tm = virt_to_phys(tm);
404 phys_tc = virt_to_phys(tc);
405
406 status = efi_thunk(get_time, phys_tm, phys_tc);
407
408 spin_unlock(&rtc_lock);
409
410 return status;
411}
412
413static efi_status_t efi_thunk_set_time(efi_time_t *tm)
414{
415 efi_status_t status;
416 u32 phys_tm;
417
418 spin_lock(&rtc_lock);
419
420 phys_tm = virt_to_phys(tm);
421
422 status = efi_thunk(set_time, phys_tm);
423
424 spin_unlock(&rtc_lock);
425
426 return status;
427}
428
429static efi_status_t
430efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
431 efi_time_t *tm)
432{
433 efi_status_t status;
434 u32 phys_enabled, phys_pending, phys_tm;
435
436 spin_lock(&rtc_lock);
437
438 phys_enabled = virt_to_phys(enabled);
439 phys_pending = virt_to_phys(pending);
440 phys_tm = virt_to_phys(tm);
441
442 status = efi_thunk(get_wakeup_time, phys_enabled,
443 phys_pending, phys_tm);
444
445 spin_unlock(&rtc_lock);
446
447 return status;
448}
449
450static efi_status_t
451efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
452{
453 efi_status_t status;
454 u32 phys_tm;
455
456 spin_lock(&rtc_lock);
457
458 phys_tm = virt_to_phys(tm);
459
460 status = efi_thunk(set_wakeup_time, enabled, phys_tm);
461
462 spin_unlock(&rtc_lock);
463
464 return status;
465}
466
467
468static efi_status_t
469efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
470 u32 *attr, unsigned long *data_size, void *data)
471{
472 efi_status_t status;
473 u32 phys_name, phys_vendor, phys_attr;
474 u32 phys_data_size, phys_data;
475
476 phys_data_size = virt_to_phys(data_size);
477 phys_vendor = virt_to_phys(vendor);
478 phys_name = virt_to_phys(name);
479 phys_attr = virt_to_phys(attr);
480 phys_data = virt_to_phys(data);
481
482 status = efi_thunk(get_variable, phys_name, phys_vendor,
483 phys_attr, phys_data_size, phys_data);
484
485 return status;
486}
487
488static efi_status_t
489efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
490 u32 attr, unsigned long data_size, void *data)
491{
492 u32 phys_name, phys_vendor, phys_data;
493 efi_status_t status;
494
495 phys_name = virt_to_phys(name);
496 phys_vendor = virt_to_phys(vendor);
497 phys_data = virt_to_phys(data);
498
499 /* If data_size is > sizeof(u32) we've got problems */
500 status = efi_thunk(set_variable, phys_name, phys_vendor,
501 attr, data_size, phys_data);
502
503 return status;
504}
505
506static efi_status_t
507efi_thunk_get_next_variable(unsigned long *name_size,
508 efi_char16_t *name,
509 efi_guid_t *vendor)
510{
511 efi_status_t status;
512 u32 phys_name_size, phys_name, phys_vendor;
513
514 phys_name_size = virt_to_phys(name_size);
515 phys_vendor = virt_to_phys(vendor);
516 phys_name = virt_to_phys(name);
517
518 status = efi_thunk(get_next_variable, phys_name_size,
519 phys_name, phys_vendor);
520
521 return status;
522}
523
524static efi_status_t
525efi_thunk_get_next_high_mono_count(u32 *count)
526{
527 efi_status_t status;
528 u32 phys_count;
529
530 phys_count = virt_to_phys(count);
531 status = efi_thunk(get_next_high_mono_count, phys_count);
532
533 return status;
534}
535
536static void
537efi_thunk_reset_system(int reset_type, efi_status_t status,
538 unsigned long data_size, efi_char16_t *data)
539{
540 u32 phys_data;
541
542 phys_data = virt_to_phys(data);
543
544 efi_thunk(reset_system, reset_type, status, data_size, phys_data);
545}
546
547static efi_status_t
548efi_thunk_update_capsule(efi_capsule_header_t **capsules,
549 unsigned long count, unsigned long sg_list)
550{
551 /*
552 * To properly support this function we would need to repackage
553 * 'capsules' because the firmware doesn't understand 64-bit
554 * pointers.
555 */
556 return EFI_UNSUPPORTED;
557}
558
559static efi_status_t
560efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
561 u64 *remaining_space,
562 u64 *max_variable_size)
563{
564 efi_status_t status;
565 u32 phys_storage, phys_remaining, phys_max;
566
567 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
568 return EFI_UNSUPPORTED;
569
570 phys_storage = virt_to_phys(storage_space);
571 phys_remaining = virt_to_phys(remaining_space);
572 phys_max = virt_to_phys(max_variable_size);
573
Matt Fleming9a110402014-03-16 17:46:46 +0000574 status = efi_thunk(query_variable_info, attr, phys_storage,
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000575 phys_remaining, phys_max);
576
577 return status;
578}
579
580static efi_status_t
581efi_thunk_query_capsule_caps(efi_capsule_header_t **capsules,
582 unsigned long count, u64 *max_size,
583 int *reset_type)
584{
585 /*
586 * To properly support this function we would need to repackage
587 * 'capsules' because the firmware doesn't understand 64-bit
588 * pointers.
589 */
590 return EFI_UNSUPPORTED;
591}
592
593void efi_thunk_runtime_setup(void)
594{
595 efi.get_time = efi_thunk_get_time;
596 efi.set_time = efi_thunk_set_time;
597 efi.get_wakeup_time = efi_thunk_get_wakeup_time;
598 efi.set_wakeup_time = efi_thunk_set_wakeup_time;
599 efi.get_variable = efi_thunk_get_variable;
600 efi.get_next_variable = efi_thunk_get_next_variable;
601 efi.set_variable = efi_thunk_set_variable;
602 efi.get_next_high_mono_count = efi_thunk_get_next_high_mono_count;
603 efi.reset_system = efi_thunk_reset_system;
604 efi.query_variable_info = efi_thunk_query_variable_info;
605 efi.update_capsule = efi_thunk_update_capsule;
606 efi.query_capsule_caps = efi_thunk_query_capsule_caps;
607}
608#endif /* CONFIG_EFI_MIXED */