blob: bed7e7f4e44c52389ca1e5c20fb489033d2ebd59 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Huang, Ying5b836832008-01-30 13:31:19 +01002/*
3 * x86_64 specific EFI support functions
4 * Based on Extensible Firmware Interface Specification version 1.0
5 *
6 * Copyright (C) 2005-2008 Intel Co.
7 * Fenghua Yu <fenghua.yu@intel.com>
8 * Bibo Mao <bibo.mao@intel.com>
9 * Chandramouli Narayanan <mouli@linux.intel.com>
10 * Huang Ying <ying.huang@intel.com>
11 *
12 * Code to convert EFI to E820 map has been implemented in elilo bootloader
13 * based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table
14 * is setup appropriately for EFI runtime code.
15 * - mouli 06/14/2007.
16 *
17 */
18
Matt Fleming26d7f652015-10-25 10:26:35 +000019#define pr_fmt(fmt) "efi: " fmt
20
Huang, Ying5b836832008-01-30 13:31:19 +010021#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/mm.h>
24#include <linux/types.h>
25#include <linux/spinlock.h>
26#include <linux/bootmem.h>
27#include <linux/ioport.h>
Arnd Bergmann5ab788d2016-05-30 20:57:50 +020028#include <linux/mc146818rtc.h>
Huang, Ying5b836832008-01-30 13:31:19 +010029#include <linux/efi.h>
Andy Lutomirski116fef62018-01-31 07:56:22 -080030#include <linux/export.h>
Huang, Ying5b836832008-01-30 13:31:19 +010031#include <linux/uaccess.h>
32#include <linux/io.h>
33#include <linux/reboot.h>
David Howells0d01ff22013-04-11 23:51:01 +010034#include <linux/slab.h>
Matt Flemingf6697df2016-11-12 21:04:24 +000035#include <linux/ucs2_string.h>
Tom Lendacky1379edd2017-10-20 09:30:49 -050036#include <linux/mem_encrypt.h>
Sai Praneeth03781e42018-03-12 09:43:55 +000037#include <linux/sched/task.h>
Huang, Ying5b836832008-01-30 13:31:19 +010038
39#include <asm/setup.h>
40#include <asm/page.h>
Ingo Molnar66441bd2017-01-27 10:27:10 +010041#include <asm/e820/api.h>
Huang, Ying5b836832008-01-30 13:31:19 +010042#include <asm/pgtable.h>
43#include <asm/tlbflush.h>
Huang, Ying5b836832008-01-30 13:31:19 +010044#include <asm/proto.h>
45#include <asm/efi.h>
Huang, Ying4de0d4a2008-02-13 17:22:41 +080046#include <asm/cacheflush.h>
Brian Gerst3819cd42009-01-23 11:03:29 +090047#include <asm/fixmap.h>
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +010048#include <asm/realmode.h>
Matt Fleming4f9dbcf2014-01-10 18:48:30 +000049#include <asm/time.h>
Matt Fleming67a91082015-11-27 21:09:34 +000050#include <asm/pgalloc.h>
Huang, Ying5b836832008-01-30 13:31:19 +010051
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +010052/*
Baoquan Heb1d17762017-04-04 17:02:43 +010053 * We allocate runtime services regions top-down, starting from -4G, i.e.
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +010054 * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G.
55 */
Mathias Krause8266e312014-09-21 17:26:54 +020056static u64 efi_va = EFI_VA_START;
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +010057
Matt Flemingc9f2a9a2015-11-27 21:09:33 +000058struct efi_scratch efi_scratch;
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +010059
Matthew Garrett9cd2b072011-05-05 15:19:43 -040060static void __init early_code_mapping_set_exec(int executable)
Huang, Ying5b836832008-01-30 13:31:19 +010061{
62 efi_memory_desc_t *md;
Huang, Ying5b836832008-01-30 13:31:19 +010063
Huang, Yinga2172e22008-01-30 13:33:55 +010064 if (!(__supported_pte_mask & _PAGE_NX))
65 return;
66
Matthew Garrett916f6762011-05-25 09:53:13 -040067 /* Make EFI service code area executable */
Matt Fleming78ce2482016-04-25 21:06:38 +010068 for_each_efi_memory_desc(md) {
Matthew Garrett916f6762011-05-25 09:53:13 -040069 if (md->type == EFI_RUNTIME_SERVICES_CODE ||
70 md->type == EFI_BOOT_SERVICES_CODE)
Matthew Garrett9cd2b072011-05-05 15:19:43 -040071 efi_set_executable(md, executable);
Huang, Ying5b836832008-01-30 13:31:19 +010072 }
73}
74
Ingo Molnar744937b2015-03-03 07:48:50 +010075pgd_t * __init efi_call_phys_prolog(void)
Huang, Ying5b836832008-01-30 13:31:19 +010076{
Baoquan He94133e42017-05-26 12:36:50 +010077 unsigned long vaddr, addr_pgd, addr_p4d, addr_pud;
78 pgd_t *save_pgd, *pgd_k, *pgd_efi;
79 p4d_t *p4d, *p4d_k, *p4d_efi;
80 pud_t *pud;
Ingo Molnar744937b2015-03-03 07:48:50 +010081
Nathan Zimmerb8f2c212013-01-08 09:02:43 -060082 int pgd;
Baoquan He94133e42017-05-26 12:36:50 +010083 int n_pgds, i, j;
Huang, Ying5b836832008-01-30 13:31:19 +010084
Matt Flemingc9f2a9a2015-11-27 21:09:33 +000085 if (!efi_enabled(EFI_OLD_MEMMAP)) {
Sai Praneeth03781e42018-03-12 09:43:55 +000086 efi_switch_mm(&efi_mm);
87 return NULL;
Matt Flemingc9f2a9a2015-11-27 21:09:33 +000088 }
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +010089
Matthew Garrett9cd2b072011-05-05 15:19:43 -040090 early_code_mapping_set_exec(1);
Nathan Zimmerb8f2c212013-01-08 09:02:43 -060091
92 n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
Markus Elfring20ebc152016-08-25 11:34:03 +020093 save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL);
Nathan Zimmerb8f2c212013-01-08 09:02:43 -060094
Baoquan He94133e42017-05-26 12:36:50 +010095 /*
96 * Build 1:1 identity mapping for efi=old_map usage. Note that
97 * PAGE_OFFSET is PGDIR_SIZE aligned when KASLR is disabled, while
98 * it is PUD_SIZE ALIGNED with KASLR enabled. So for a given physical
99 * address X, the pud_index(X) != pud_index(__va(X)), we can only copy
100 * PUD entry of __va(X) to fill in pud entry of X to build 1:1 mapping.
101 * This means here we can only reuse the PMD tables of the direct mapping.
102 */
Nathan Zimmerb8f2c212013-01-08 09:02:43 -0600103 for (pgd = 0; pgd < n_pgds; pgd++) {
Baoquan He94133e42017-05-26 12:36:50 +0100104 addr_pgd = (unsigned long)(pgd * PGDIR_SIZE);
105 vaddr = (unsigned long)__va(pgd * PGDIR_SIZE);
106 pgd_efi = pgd_offset_k(addr_pgd);
107 save_pgd[pgd] = *pgd_efi;
108
109 p4d = p4d_alloc(&init_mm, pgd_efi, addr_pgd);
110 if (!p4d) {
111 pr_err("Failed to allocate p4d table!\n");
112 goto out;
113 }
114
115 for (i = 0; i < PTRS_PER_P4D; i++) {
116 addr_p4d = addr_pgd + i * P4D_SIZE;
117 p4d_efi = p4d + p4d_index(addr_p4d);
118
119 pud = pud_alloc(&init_mm, p4d_efi, addr_p4d);
120 if (!pud) {
121 pr_err("Failed to allocate pud table!\n");
122 goto out;
123 }
124
125 for (j = 0; j < PTRS_PER_PUD; j++) {
126 addr_pud = addr_p4d + j * PUD_SIZE;
127
128 if (addr_pud > (max_pfn << PAGE_SHIFT))
129 break;
130
131 vaddr = (unsigned long)__va(addr_pud);
132
133 pgd_k = pgd_offset_k(vaddr);
134 p4d_k = p4d_offset(pgd_k, vaddr);
135 pud[j] = *pud_offset(p4d_k, vaddr);
136 }
137 }
Jiri Kosinade53c372018-01-05 22:35:41 +0100138 pgd_offset_k(pgd * PGDIR_SIZE)->pgd &= ~_PAGE_NX;
Nathan Zimmerb8f2c212013-01-08 09:02:43 -0600139 }
Jiri Kosinade53c372018-01-05 22:35:41 +0100140
Matt Flemingc9f2a9a2015-11-27 21:09:33 +0000141out:
Huang, Ying5b836832008-01-30 13:31:19 +0100142 __flush_tlb_all();
Ingo Molnar744937b2015-03-03 07:48:50 +0100143
144 return save_pgd;
Huang, Ying5b836832008-01-30 13:31:19 +0100145}
146
Ingo Molnar744937b2015-03-03 07:48:50 +0100147void __init efi_call_phys_epilog(pgd_t *save_pgd)
Huang, Ying5b836832008-01-30 13:31:19 +0100148{
149 /*
150 * After the lock is released, the original page table is restored.
151 */
Baoquan He94133e42017-05-26 12:36:50 +0100152 int pgd_idx, i;
Ingo Molnar744937b2015-03-03 07:48:50 +0100153 int nr_pgds;
Baoquan He94133e42017-05-26 12:36:50 +0100154 pgd_t *pgd;
155 p4d_t *p4d;
156 pud_t *pud;
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +0100157
Matt Flemingc9f2a9a2015-11-27 21:09:33 +0000158 if (!efi_enabled(EFI_OLD_MEMMAP)) {
Sai Praneeth03781e42018-03-12 09:43:55 +0000159 efi_switch_mm(efi_scratch.prev_mm);
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +0100160 return;
Matt Flemingc9f2a9a2015-11-27 21:09:33 +0000161 }
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +0100162
Ingo Molnar744937b2015-03-03 07:48:50 +0100163 nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
164
Baoquan He94133e42017-05-26 12:36:50 +0100165 for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) {
166 pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
Ingo Molnar744937b2015-03-03 07:48:50 +0100167 set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
168
Baoquan He94133e42017-05-26 12:36:50 +0100169 if (!(pgd_val(*pgd) & _PAGE_PRESENT))
170 continue;
171
172 for (i = 0; i < PTRS_PER_P4D; i++) {
173 p4d = p4d_offset(pgd,
174 pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
175
176 if (!(p4d_val(*p4d) & _PAGE_PRESENT))
177 continue;
178
179 pud = (pud_t *)p4d_page_vaddr(*p4d);
180 pud_free(&init_mm, pud);
181 }
182
183 p4d = (p4d_t *)pgd_page_vaddr(*pgd);
184 p4d_free(&init_mm, p4d);
185 }
186
Nathan Zimmerb8f2c212013-01-08 09:02:43 -0600187 kfree(save_pgd);
Ingo Molnar744937b2015-03-03 07:48:50 +0100188
Huang, Ying5b836832008-01-30 13:31:19 +0100189 __flush_tlb_all();
Matthew Garrett9cd2b072011-05-05 15:19:43 -0400190 early_code_mapping_set_exec(0);
Huang, Ying5b836832008-01-30 13:31:19 +0100191}
Keith Packarde1ad7832011-12-11 16:12:42 -0800192
Sai Praneeth3ede3412018-03-12 09:43:54 +0000193EXPORT_SYMBOL_GPL(efi_mm);
Matt Fleming67a91082015-11-27 21:09:34 +0000194
195/*
196 * We need our own copy of the higher levels of the page tables
197 * because we want to avoid inserting EFI region mappings (EFI_VA_END
198 * to EFI_VA_START) into the standard kernel page tables. Everything
199 * else can be shared, see efi_sync_low_kernel_mappings().
Dave Hansend9e9a642017-12-04 15:07:39 +0100200 *
201 * We don't want the pgd on the pgd_list and cannot use pgd_alloc() for the
202 * allocation.
Matt Fleming67a91082015-11-27 21:09:34 +0000203 */
204int __init efi_alloc_page_tables(void)
205{
Sai Praneeth3ede3412018-03-12 09:43:54 +0000206 pgd_t *pgd, *efi_pgd;
Kirill A. Shutemove9813162017-03-17 21:55:11 +0300207 p4d_t *p4d;
Matt Fleming67a91082015-11-27 21:09:34 +0000208 pud_t *pud;
209 gfp_t gfp_mask;
210
211 if (efi_enabled(EFI_OLD_MEMMAP))
212 return 0;
213
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -0800214 gfp_mask = GFP_KERNEL | __GFP_ZERO;
Dave Hansend9e9a642017-12-04 15:07:39 +0100215 efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
Matt Fleming67a91082015-11-27 21:09:34 +0000216 if (!efi_pgd)
217 return -ENOMEM;
218
219 pgd = efi_pgd + pgd_index(EFI_VA_END);
Kirill A. Shutemove9813162017-03-17 21:55:11 +0300220 p4d = p4d_alloc(&init_mm, pgd, EFI_VA_END);
221 if (!p4d) {
Matt Fleming67a91082015-11-27 21:09:34 +0000222 free_page((unsigned long)efi_pgd);
223 return -ENOMEM;
224 }
225
Kirill A. Shutemove9813162017-03-17 21:55:11 +0300226 pud = pud_alloc(&init_mm, p4d, EFI_VA_END);
227 if (!pud) {
Kirill A. Shutemov91f606a2018-02-14 21:25:41 +0300228 if (pgtable_l5_enabled)
Kirill A. Shutemove9813162017-03-17 21:55:11 +0300229 free_page((unsigned long) pgd_page_vaddr(*pgd));
Waiman Long06ace26f2018-03-22 15:18:53 -0400230 free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER);
Kirill A. Shutemove9813162017-03-17 21:55:11 +0300231 return -ENOMEM;
232 }
Matt Fleming67a91082015-11-27 21:09:34 +0000233
Sai Praneeth3ede3412018-03-12 09:43:54 +0000234 efi_mm.pgd = efi_pgd;
Sai Praneeth7e904a92018-03-12 08:44:56 +0000235 mm_init_cpumask(&efi_mm);
236 init_new_context(NULL, &efi_mm);
237
Matt Fleming67a91082015-11-27 21:09:34 +0000238 return 0;
239}
240
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +0100241/*
242 * Add low kernel mappings for passing arguments to EFI functions.
243 */
244void efi_sync_low_kernel_mappings(void)
245{
Matt Fleming67a91082015-11-27 21:09:34 +0000246 unsigned num_entries;
247 pgd_t *pgd_k, *pgd_efi;
Kirill A. Shutemove0c4f672017-03-13 17:33:05 +0300248 p4d_t *p4d_k, *p4d_efi;
Matt Fleming67a91082015-11-27 21:09:34 +0000249 pud_t *pud_k, *pud_efi;
Sai Praneeth3ede3412018-03-12 09:43:54 +0000250 pgd_t *efi_pgd = efi_mm.pgd;
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +0100251
252 if (efi_enabled(EFI_OLD_MEMMAP))
253 return;
254
Matt Fleming67a91082015-11-27 21:09:34 +0000255 /*
256 * We can share all PGD entries apart from the one entry that
257 * covers the EFI runtime mapping space.
258 *
259 * Make sure the EFI runtime region mappings are guaranteed to
260 * only span a single PGD entry and that the entry also maps
261 * other important kernel regions.
262 */
Kirill A. Shutemovc65e7742018-02-14 14:16:53 +0300263 MAYBE_BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END));
264 MAYBE_BUILD_BUG_ON((EFI_VA_START & PGDIR_MASK) !=
Matt Fleming67a91082015-11-27 21:09:34 +0000265 (EFI_VA_END & PGDIR_MASK));
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +0100266
Matt Fleming67a91082015-11-27 21:09:34 +0000267 pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
268 pgd_k = pgd_offset_k(PAGE_OFFSET);
269
270 num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
271 memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries);
272
273 /*
Kirill A. Shutemove9813162017-03-17 21:55:11 +0300274 * As with PGDs, we share all P4D entries apart from the one entry
275 * that covers the EFI runtime mapping space.
276 */
277 BUILD_BUG_ON(p4d_index(EFI_VA_END) != p4d_index(MODULES_END));
278 BUILD_BUG_ON((EFI_VA_START & P4D_MASK) != (EFI_VA_END & P4D_MASK));
279
280 pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
281 pgd_k = pgd_offset_k(EFI_VA_END);
282 p4d_efi = p4d_offset(pgd_efi, 0);
283 p4d_k = p4d_offset(pgd_k, 0);
284
285 num_entries = p4d_index(EFI_VA_END);
286 memcpy(p4d_efi, p4d_k, sizeof(p4d_t) * num_entries);
287
288 /*
Matt Fleming67a91082015-11-27 21:09:34 +0000289 * We share all the PUD entries apart from those that map the
290 * EFI regions. Copy around them.
291 */
292 BUILD_BUG_ON((EFI_VA_START & ~PUD_MASK) != 0);
293 BUILD_BUG_ON((EFI_VA_END & ~PUD_MASK) != 0);
294
Kirill A. Shutemove9813162017-03-17 21:55:11 +0300295 p4d_efi = p4d_offset(pgd_efi, EFI_VA_END);
296 p4d_k = p4d_offset(pgd_k, EFI_VA_END);
Kirill A. Shutemove0c4f672017-03-13 17:33:05 +0300297 pud_efi = pud_offset(p4d_efi, 0);
Kirill A. Shutemove0c4f672017-03-13 17:33:05 +0300298 pud_k = pud_offset(p4d_k, 0);
Matt Fleming67a91082015-11-27 21:09:34 +0000299
300 num_entries = pud_index(EFI_VA_END);
301 memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
302
Kirill A. Shutemove0c4f672017-03-13 17:33:05 +0300303 pud_efi = pud_offset(p4d_efi, EFI_VA_START);
Kirill A. Shutemove0c4f672017-03-13 17:33:05 +0300304 pud_k = pud_offset(p4d_k, EFI_VA_START);
Matt Fleming67a91082015-11-27 21:09:34 +0000305
306 num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START);
307 memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +0100308}
309
Matt Flemingf6697df2016-11-12 21:04:24 +0000310/*
311 * Wrapper for slow_virt_to_phys() that handles NULL addresses.
312 */
313static inline phys_addr_t
314virt_to_phys_or_null_size(void *va, unsigned long size)
315{
316 bool bad_size;
317
318 if (!va)
319 return 0;
320
321 if (virt_addr_valid(va))
322 return virt_to_phys(va);
323
324 /*
325 * A fully aligned variable on the stack is guaranteed not to
326 * cross a page bounary. Try to catch strings on the stack by
327 * checking that 'size' is a power of two.
328 */
329 bad_size = size > PAGE_SIZE || !is_power_of_2(size);
330
331 WARN_ON(!IS_ALIGNED((unsigned long)va, size) || bad_size);
332
333 return slow_virt_to_phys(va);
334}
335
336#define virt_to_phys_or_null(addr) \
337 virt_to_phys_or_null_size((addr), sizeof(*(addr)))
338
Mathias Krause4e78eb052014-09-07 19:42:17 +0200339int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +0100340{
Tom Lendacky38eeccc2017-07-17 16:10:15 -0500341 unsigned long pfn, text, pf;
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000342 struct page *page;
Matt Fleming994448f2014-03-05 18:15:37 +0000343 unsigned npages;
Sai Praneeth3ede3412018-03-12 09:43:54 +0000344 pgd_t *pgd = efi_mm.pgd;
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +0100345
Borislav Petkovb7b898a2014-01-18 12:48:17 +0100346 if (efi_enabled(EFI_OLD_MEMMAP))
347 return 0;
348
Tom Lendacky38eeccc2017-07-17 16:10:15 -0500349 /*
Borislav Petkovb7b898a2014-01-18 12:48:17 +0100350 * It can happen that the physical address of new_memmap lands in memory
351 * which is not mapped in the EFI page table. Therefore we need to go
352 * and ident-map those pages containing the map before calling
353 * phys_efi_set_virtual_address_map().
354 */
Matt Flemingedc3b912015-11-27 21:09:31 +0000355 pfn = pa_memmap >> PAGE_SHIFT;
Tom Lendacky38eeccc2017-07-17 16:10:15 -0500356 pf = _PAGE_NX | _PAGE_RW | _PAGE_ENC;
357 if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, pf)) {
Borislav Petkovb7b898a2014-01-18 12:48:17 +0100358 pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
359 return 1;
360 }
361
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000362 /*
Jiri Kosinabf29bdd2017-01-27 22:25:52 +0000363 * Certain firmware versions are way too sentimential and still believe
364 * they are exclusive and unquestionable owners of the first physical page,
365 * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
366 * (but then write-access it later during SetVirtualAddressMap()).
367 *
368 * Create a 1:1 mapping for this page, to avoid triple faults during early
369 * boot with such firmware. We are free to hand this page to the BIOS,
370 * as trim_bios_range() will reserve the first page and isolate it away
371 * from memory allocators anyway.
372 */
Tom Lendacky1379edd2017-10-20 09:30:49 -0500373 pf = _PAGE_RW;
374 if (sev_active())
375 pf |= _PAGE_ENC;
376
377 if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, pf)) {
Jiri Kosinabf29bdd2017-01-27 22:25:52 +0000378 pr_err("Failed to create 1:1 mapping for the first page!\n");
379 return 1;
380 }
381
382 /*
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000383 * When making calls to the firmware everything needs to be 1:1
384 * mapped and addressable with 32-bit pointers. Map the kernel
385 * text and allocate a new stack because we can't rely on the
386 * stack pointer being < 4GB.
387 */
Matt Fleming12976672016-09-19 13:09:09 +0100388 if (!IS_ENABLED(CONFIG_EFI_MIXED) || efi_is_native())
Matt Fleming994448f2014-03-05 18:15:37 +0000389 return 0;
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000390
391 page = alloc_page(GFP_KERNEL|__GFP_DMA32);
392 if (!page)
393 panic("Unable to allocate EFI runtime stack < 4GB\n");
394
395 efi_scratch.phys_stack = virt_to_phys(page_address(page));
396 efi_scratch.phys_stack += PAGE_SIZE; /* stack grows down */
397
Sai Praneeth2ad510d2016-02-17 12:36:06 +0000398 npages = (_etext - _text) >> PAGE_SHIFT;
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000399 text = __pa(_text);
Matt Flemingedc3b912015-11-27 21:09:31 +0000400 pfn = text >> PAGE_SHIFT;
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000401
Tom Lendacky38eeccc2017-07-17 16:10:15 -0500402 pf = _PAGE_RW | _PAGE_ENC;
403 if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, pf)) {
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000404 pr_err("Failed to map kernel text 1:1\n");
Matt Fleming994448f2014-03-05 18:15:37 +0000405 return 1;
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000406 }
Borislav Petkovb7b898a2014-01-18 12:48:17 +0100407
408 return 0;
409}
410
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +0100411static void __init __map_region(efi_memory_desc_t *md, u64 va)
412{
Sai Praneeth15f003d2016-02-17 12:36:04 +0000413 unsigned long flags = _PAGE_RW;
Matt Flemingedc3b912015-11-27 21:09:31 +0000414 unsigned long pfn;
Sai Praneeth3ede3412018-03-12 09:43:54 +0000415 pgd_t *pgd = efi_mm.pgd;
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +0100416
417 if (!(md->attribute & EFI_MEMORY_WB))
Matt Flemingedc3b912015-11-27 21:09:31 +0000418 flags |= _PAGE_PCD;
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +0100419
Tom Lendacky1379edd2017-10-20 09:30:49 -0500420 if (sev_active())
421 flags |= _PAGE_ENC;
422
Matt Flemingedc3b912015-11-27 21:09:31 +0000423 pfn = md->phys_addr >> PAGE_SHIFT;
424 if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +0100425 pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
426 md->phys_addr, va);
427}
428
429void __init efi_map_region(efi_memory_desc_t *md)
430{
431 unsigned long size = md->num_pages << PAGE_SHIFT;
432 u64 pa = md->phys_addr;
433
434 if (efi_enabled(EFI_OLD_MEMMAP))
435 return old_map_region(md);
436
437 /*
438 * Make sure the 1:1 mappings are present as a catch-all for b0rked
439 * firmware which doesn't update all internal pointers after switching
440 * to virtual mode and would otherwise crap on us.
441 */
442 __map_region(md, md->phys_addr);
443
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000444 /*
445 * Enforce the 1:1 mapping as the default virtual address when
446 * booting in EFI mixed mode, because even though we may be
447 * running a 64-bit kernel, the firmware may only be 32-bit.
448 */
449 if (!efi_is_native () && IS_ENABLED(CONFIG_EFI_MIXED)) {
450 md->virt_addr = md->phys_addr;
451 return;
452 }
453
Borislav Petkovd2f7cbe2013-10-31 17:25:08 +0100454 efi_va -= size;
455
456 /* Is PA 2M-aligned? */
457 if (!(pa & (PMD_SIZE - 1))) {
458 efi_va &= PMD_MASK;
459 } else {
460 u64 pa_offset = pa & (PMD_SIZE - 1);
461 u64 prev_va = efi_va;
462
463 /* get us the same offset within this 2M page */
464 efi_va = (efi_va & PMD_MASK) + pa_offset;
465
466 if (efi_va > prev_va)
467 efi_va -= PMD_SIZE;
468 }
469
470 if (efi_va < EFI_VA_END) {
471 pr_warn(FW_WARN "VA address range overflow!\n");
472 return;
473 }
474
475 /* Do the VA map */
476 __map_region(md, efi_va);
477 md->virt_addr = efi_va;
478}
479
Dave Young3b266492013-12-20 18:02:14 +0800480/*
481 * kexec kernel will use efi_map_region_fixed to map efi runtime memory ranges.
482 * md->virt_addr is the original virtual address which had been mapped in kexec
483 * 1st kernel.
484 */
485void __init efi_map_region_fixed(efi_memory_desc_t *md)
486{
Alex Thorlton0513fe12016-08-05 18:59:35 -0500487 __map_region(md, md->phys_addr);
Dave Young3b266492013-12-20 18:02:14 +0800488 __map_region(md, md->virt_addr);
489}
490
Keith Packarde1ad7832011-12-11 16:12:42 -0800491void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
Matt Fleming3e8fa262012-10-19 13:25:46 +0100492 u32 type, u64 attribute)
Keith Packarde1ad7832011-12-11 16:12:42 -0800493{
494 unsigned long last_map_pfn;
495
496 if (type == EFI_MEMORY_MAPPED_IO)
497 return ioremap(phys_addr, size);
498
499 last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
500 if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
501 unsigned long top = last_map_pfn << PAGE_SHIFT;
Matt Fleming3e8fa262012-10-19 13:25:46 +0100502 efi_ioremap(top, size - (top - phys_addr), type, attribute);
Keith Packarde1ad7832011-12-11 16:12:42 -0800503 }
504
Matt Fleming3e8fa262012-10-19 13:25:46 +0100505 if (!(attribute & EFI_MEMORY_WB))
506 efi_memory_uc((u64)(unsigned long)__va(phys_addr), size);
507
Keith Packarde1ad7832011-12-11 16:12:42 -0800508 return (void __iomem *)__va(phys_addr);
509}
Dave Young1fec0532013-12-20 18:02:19 +0800510
511void __init parse_efi_setup(u64 phys_addr, u32 data_len)
512{
513 efi_setup = phys_addr + sizeof(struct setup_data);
Dave Young1fec0532013-12-20 18:02:19 +0800514}
Borislav Petkovc55d0162014-02-14 08:24:24 +0100515
Sai Praneeth18141e82017-01-31 13:21:37 +0000516static int __init efi_update_mappings(efi_memory_desc_t *md, unsigned long pf)
Borislav Petkovc55d0162014-02-14 08:24:24 +0100517{
Sai Praneeth6d0cc882016-02-17 12:36:05 +0000518 unsigned long pfn;
Sai Praneeth3ede3412018-03-12 09:43:54 +0000519 pgd_t *pgd = efi_mm.pgd;
Sai Praneeth18141e82017-01-31 13:21:37 +0000520 int err1, err2;
521
522 /* Update the 1:1 mapping */
523 pfn = md->phys_addr >> PAGE_SHIFT;
524 err1 = kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, md->num_pages, pf);
525 if (err1) {
526 pr_err("Error while updating 1:1 mapping PA 0x%llx -> VA 0x%llx!\n",
527 md->phys_addr, md->virt_addr);
528 }
529
530 err2 = kernel_map_pages_in_pgd(pgd, pfn, md->virt_addr, md->num_pages, pf);
531 if (err2) {
532 pr_err("Error while updating VA mapping PA 0x%llx -> VA 0x%llx!\n",
533 md->phys_addr, md->virt_addr);
534 }
535
536 return err1 || err2;
537}
538
539static int __init efi_update_mem_attr(struct mm_struct *mm, efi_memory_desc_t *md)
540{
541 unsigned long pf = 0;
542
543 if (md->attribute & EFI_MEMORY_XP)
544 pf |= _PAGE_NX;
545
546 if (!(md->attribute & EFI_MEMORY_RO))
547 pf |= _PAGE_RW;
548
Tom Lendacky1379edd2017-10-20 09:30:49 -0500549 if (sev_active())
550 pf |= _PAGE_ENC;
551
Sai Praneeth18141e82017-01-31 13:21:37 +0000552 return efi_update_mappings(md, pf);
553}
554
555void __init efi_runtime_update_mappings(void)
556{
Sai Praneeth6d0cc882016-02-17 12:36:05 +0000557 efi_memory_desc_t *md;
Sai Praneeth6d0cc882016-02-17 12:36:05 +0000558
559 if (efi_enabled(EFI_OLD_MEMMAP)) {
560 if (__supported_pte_mask & _PAGE_NX)
561 runtime_code_page_mkexec();
562 return;
563 }
564
Sai Praneeth18141e82017-01-31 13:21:37 +0000565 /*
566 * Use the EFI Memory Attribute Table for mapping permissions if it
567 * exists, since it is intended to supersede EFI_PROPERTIES_TABLE.
568 */
569 if (efi_enabled(EFI_MEM_ATTR)) {
570 efi_memattr_apply_permissions(NULL, efi_update_mem_attr);
571 return;
572 }
573
574 /*
575 * EFI_MEMORY_ATTRIBUTES_TABLE is intended to replace
576 * EFI_PROPERTIES_TABLE. So, use EFI_PROPERTIES_TABLE to update
577 * permissions only if EFI_MEMORY_ATTRIBUTES_TABLE is not
578 * published by the firmware. Even if we find a buggy implementation of
579 * EFI_MEMORY_ATTRIBUTES_TABLE, don't fall back to
580 * EFI_PROPERTIES_TABLE, because of the same reason.
581 */
582
Sai Praneeth6d0cc882016-02-17 12:36:05 +0000583 if (!efi_enabled(EFI_NX_PE_DATA))
Borislav Petkovc55d0162014-02-14 08:24:24 +0100584 return;
585
Matt Fleming78ce2482016-04-25 21:06:38 +0100586 for_each_efi_memory_desc(md) {
Sai Praneeth6d0cc882016-02-17 12:36:05 +0000587 unsigned long pf = 0;
Sai Praneeth6d0cc882016-02-17 12:36:05 +0000588
589 if (!(md->attribute & EFI_MEMORY_RUNTIME))
590 continue;
591
592 if (!(md->attribute & EFI_MEMORY_WB))
593 pf |= _PAGE_PCD;
594
595 if ((md->attribute & EFI_MEMORY_XP) ||
596 (md->type == EFI_RUNTIME_SERVICES_DATA))
597 pf |= _PAGE_NX;
598
599 if (!(md->attribute & EFI_MEMORY_RO) &&
600 (md->type != EFI_RUNTIME_SERVICES_CODE))
601 pf |= _PAGE_RW;
602
Tom Lendacky1379edd2017-10-20 09:30:49 -0500603 if (sev_active())
604 pf |= _PAGE_ENC;
605
Sai Praneeth18141e82017-01-31 13:21:37 +0000606 efi_update_mappings(md, pf);
Sai Praneeth6d0cc882016-02-17 12:36:05 +0000607 }
Borislav Petkovc55d0162014-02-14 08:24:24 +0100608}
Borislav Petkov11cc8512014-01-18 12:48:15 +0100609
610void __init efi_dump_pagetable(void)
611{
612#ifdef CONFIG_EFI_PGT_DUMP
Sai Praneethac81d3d2017-06-02 13:52:06 +0000613 if (efi_enabled(EFI_OLD_MEMMAP))
614 ptdump_walk_pgd_level(NULL, swapper_pg_dir);
615 else
Sai Praneeth3ede3412018-03-12 09:43:54 +0000616 ptdump_walk_pgd_level(NULL, efi_mm.pgd);
Borislav Petkov11cc8512014-01-18 12:48:15 +0100617#endif
618}
Matt Fleming994448f2014-03-05 18:15:37 +0000619
Sai Praneeth03781e42018-03-12 09:43:55 +0000620/*
621 * Makes the calling thread switch to/from efi_mm context. Can be used
622 * for SetVirtualAddressMap() i.e. current->active_mm == init_mm as well
623 * as during efi runtime calls i.e current->active_mm == current_mm.
624 * We are not mm_dropping()/mm_grabbing() any mm, because we are not
625 * losing/creating any references.
626 */
627void efi_switch_mm(struct mm_struct *mm)
628{
629 task_lock(current);
630 efi_scratch.prev_mm = current->active_mm;
631 current->active_mm = mm;
632 switch_mm(efi_scratch.prev_mm, mm, NULL);
633 task_unlock(current);
634}
635
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000636#ifdef CONFIG_EFI_MIXED
637extern efi_status_t efi64_thunk(u32, ...);
638
639#define runtime_service32(func) \
640({ \
641 u32 table = (u32)(unsigned long)efi.systab; \
642 u32 *rt, *___f; \
643 \
644 rt = (u32 *)(table + offsetof(efi_system_table_32_t, runtime)); \
645 ___f = (u32 *)(*rt + offsetof(efi_runtime_services_32_t, func)); \
646 *___f; \
647})
648
649/*
650 * Switch to the EFI page tables early so that we can access the 1:1
651 * runtime services mappings which are not mapped in any other page
652 * tables. This function must be called before runtime_service32().
653 *
654 * Also, disable interrupts because the IDT points to 64-bit handlers,
655 * which aren't going to function correctly when we switch to 32-bit.
656 */
657#define efi_thunk(f, ...) \
658({ \
659 efi_status_t __s; \
Alex Thorlton21f86622016-06-25 08:20:29 +0100660 unsigned long __flags; \
661 u32 __func; \
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000662 \
Alex Thorlton21f86622016-06-25 08:20:29 +0100663 local_irq_save(__flags); \
664 arch_efi_call_virt_setup(); \
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000665 \
Alex Thorlton21f86622016-06-25 08:20:29 +0100666 __func = runtime_service32(f); \
667 __s = efi64_thunk(__func, __VA_ARGS__); \
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000668 \
Alex Thorlton21f86622016-06-25 08:20:29 +0100669 arch_efi_call_virt_teardown(); \
670 local_irq_restore(__flags); \
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000671 \
672 __s; \
673})
674
675efi_status_t efi_thunk_set_virtual_address_map(
676 void *phys_set_virtual_address_map,
677 unsigned long memory_map_size,
678 unsigned long descriptor_size,
679 u32 descriptor_version,
680 efi_memory_desc_t *virtual_map)
681{
682 efi_status_t status;
683 unsigned long flags;
684 u32 func;
685
686 efi_sync_low_kernel_mappings();
687 local_irq_save(flags);
688
Sai Praneeth03781e42018-03-12 09:43:55 +0000689 efi_switch_mm(&efi_mm);
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000690
691 func = (u32)(unsigned long)phys_set_virtual_address_map;
692 status = efi64_thunk(func, memory_map_size, descriptor_size,
693 descriptor_version, virtual_map);
694
Sai Praneeth03781e42018-03-12 09:43:55 +0000695 efi_switch_mm(efi_scratch.prev_mm);
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000696 local_irq_restore(flags);
697
698 return status;
699}
700
701static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
702{
703 efi_status_t status;
704 u32 phys_tm, phys_tc;
705
706 spin_lock(&rtc_lock);
707
Matt Flemingf6697df2016-11-12 21:04:24 +0000708 phys_tm = virt_to_phys_or_null(tm);
709 phys_tc = virt_to_phys_or_null(tc);
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000710
711 status = efi_thunk(get_time, phys_tm, phys_tc);
712
713 spin_unlock(&rtc_lock);
714
715 return status;
716}
717
718static efi_status_t efi_thunk_set_time(efi_time_t *tm)
719{
720 efi_status_t status;
721 u32 phys_tm;
722
723 spin_lock(&rtc_lock);
724
Matt Flemingf6697df2016-11-12 21:04:24 +0000725 phys_tm = virt_to_phys_or_null(tm);
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000726
727 status = efi_thunk(set_time, phys_tm);
728
729 spin_unlock(&rtc_lock);
730
731 return status;
732}
733
734static efi_status_t
735efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
736 efi_time_t *tm)
737{
738 efi_status_t status;
739 u32 phys_enabled, phys_pending, phys_tm;
740
741 spin_lock(&rtc_lock);
742
Matt Flemingf6697df2016-11-12 21:04:24 +0000743 phys_enabled = virt_to_phys_or_null(enabled);
744 phys_pending = virt_to_phys_or_null(pending);
745 phys_tm = virt_to_phys_or_null(tm);
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000746
747 status = efi_thunk(get_wakeup_time, phys_enabled,
748 phys_pending, phys_tm);
749
750 spin_unlock(&rtc_lock);
751
752 return status;
753}
754
755static efi_status_t
756efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
757{
758 efi_status_t status;
759 u32 phys_tm;
760
761 spin_lock(&rtc_lock);
762
Matt Flemingf6697df2016-11-12 21:04:24 +0000763 phys_tm = virt_to_phys_or_null(tm);
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000764
765 status = efi_thunk(set_wakeup_time, enabled, phys_tm);
766
767 spin_unlock(&rtc_lock);
768
769 return status;
770}
771
Matt Flemingf6697df2016-11-12 21:04:24 +0000772static unsigned long efi_name_size(efi_char16_t *name)
773{
774 return ucs2_strsize(name, EFI_VAR_NAME_LEN) + 1;
775}
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000776
777static efi_status_t
778efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
779 u32 *attr, unsigned long *data_size, void *data)
780{
781 efi_status_t status;
782 u32 phys_name, phys_vendor, phys_attr;
783 u32 phys_data_size, phys_data;
784
Matt Flemingf6697df2016-11-12 21:04:24 +0000785 phys_data_size = virt_to_phys_or_null(data_size);
786 phys_vendor = virt_to_phys_or_null(vendor);
787 phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
788 phys_attr = virt_to_phys_or_null(attr);
789 phys_data = virt_to_phys_or_null_size(data, *data_size);
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000790
791 status = efi_thunk(get_variable, phys_name, phys_vendor,
792 phys_attr, phys_data_size, phys_data);
793
794 return status;
795}
796
797static efi_status_t
798efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
799 u32 attr, unsigned long data_size, void *data)
800{
801 u32 phys_name, phys_vendor, phys_data;
802 efi_status_t status;
803
Matt Flemingf6697df2016-11-12 21:04:24 +0000804 phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
805 phys_vendor = virt_to_phys_or_null(vendor);
806 phys_data = virt_to_phys_or_null_size(data, data_size);
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000807
808 /* If data_size is > sizeof(u32) we've got problems */
809 status = efi_thunk(set_variable, phys_name, phys_vendor,
810 attr, data_size, phys_data);
811
812 return status;
813}
814
815static efi_status_t
816efi_thunk_get_next_variable(unsigned long *name_size,
817 efi_char16_t *name,
818 efi_guid_t *vendor)
819{
820 efi_status_t status;
821 u32 phys_name_size, phys_name, phys_vendor;
822
Matt Flemingf6697df2016-11-12 21:04:24 +0000823 phys_name_size = virt_to_phys_or_null(name_size);
824 phys_vendor = virt_to_phys_or_null(vendor);
825 phys_name = virt_to_phys_or_null_size(name, *name_size);
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000826
827 status = efi_thunk(get_next_variable, phys_name_size,
828 phys_name, phys_vendor);
829
830 return status;
831}
832
833static efi_status_t
834efi_thunk_get_next_high_mono_count(u32 *count)
835{
836 efi_status_t status;
837 u32 phys_count;
838
Matt Flemingf6697df2016-11-12 21:04:24 +0000839 phys_count = virt_to_phys_or_null(count);
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000840 status = efi_thunk(get_next_high_mono_count, phys_count);
841
842 return status;
843}
844
845static void
846efi_thunk_reset_system(int reset_type, efi_status_t status,
847 unsigned long data_size, efi_char16_t *data)
848{
849 u32 phys_data;
850
Matt Flemingf6697df2016-11-12 21:04:24 +0000851 phys_data = virt_to_phys_or_null_size(data, data_size);
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000852
853 efi_thunk(reset_system, reset_type, status, data_size, phys_data);
854}
855
856static efi_status_t
857efi_thunk_update_capsule(efi_capsule_header_t **capsules,
858 unsigned long count, unsigned long sg_list)
859{
860 /*
861 * To properly support this function we would need to repackage
862 * 'capsules' because the firmware doesn't understand 64-bit
863 * pointers.
864 */
865 return EFI_UNSUPPORTED;
866}
867
868static efi_status_t
869efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
870 u64 *remaining_space,
871 u64 *max_variable_size)
872{
873 efi_status_t status;
874 u32 phys_storage, phys_remaining, phys_max;
875
876 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
877 return EFI_UNSUPPORTED;
878
Matt Flemingf6697df2016-11-12 21:04:24 +0000879 phys_storage = virt_to_phys_or_null(storage_space);
880 phys_remaining = virt_to_phys_or_null(remaining_space);
881 phys_max = virt_to_phys_or_null(max_variable_size);
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000882
Matt Fleming9a110402014-03-16 17:46:46 +0000883 status = efi_thunk(query_variable_info, attr, phys_storage,
Matt Fleming4f9dbcf2014-01-10 18:48:30 +0000884 phys_remaining, phys_max);
885
886 return status;
887}
888
889static efi_status_t
890efi_thunk_query_capsule_caps(efi_capsule_header_t **capsules,
891 unsigned long count, u64 *max_size,
892 int *reset_type)
893{
894 /*
895 * To properly support this function we would need to repackage
896 * 'capsules' because the firmware doesn't understand 64-bit
897 * pointers.
898 */
899 return EFI_UNSUPPORTED;
900}
901
902void efi_thunk_runtime_setup(void)
903{
904 efi.get_time = efi_thunk_get_time;
905 efi.set_time = efi_thunk_set_time;
906 efi.get_wakeup_time = efi_thunk_get_wakeup_time;
907 efi.set_wakeup_time = efi_thunk_set_wakeup_time;
908 efi.get_variable = efi_thunk_get_variable;
909 efi.get_next_variable = efi_thunk_get_next_variable;
910 efi.set_variable = efi_thunk_set_variable;
911 efi.get_next_high_mono_count = efi_thunk_get_next_high_mono_count;
912 efi.reset_system = efi_thunk_reset_system;
913 efi.query_variable_info = efi_thunk_query_variable_info;
914 efi.update_capsule = efi_thunk_update_capsule;
915 efi.query_capsule_caps = efi_thunk_query_capsule_caps;
916}
917#endif /* CONFIG_EFI_MIXED */