1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * efi.c - EFI subsystem
4 *
5 * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
6 * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
7 * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
8 *
9 * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
10 * allowing the efivarfs to be mounted or the efivars module to be loaded.
11 * The existance of /sys/firmware/efi may also be used by userspace to
12 * determine that the system supports EFI.
13 */
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/kobject.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/debugfs.h>
21 #include <linux/device.h>
22 #include <linux/efi.h>
23 #include <linux/of.h>
24 #include <linux/initrd.h>
25 #include <linux/io.h>
26 #include <linux/kexec.h>
27 #include <linux/platform_device.h>
28 #include <linux/random.h>
29 #include <linux/reboot.h>
30 #include <linux/slab.h>
31 #include <linux/acpi.h>
32 #include <linux/ucs2_string.h>
33 #include <linux/memblock.h>
34 #include <linux/security.h>
35
36 #include <asm/early_ioremap.h>
37
38 struct efi __read_mostly efi = {
39 .runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
40 .acpi = EFI_INVALID_TABLE_ADDR,
41 .acpi20 = EFI_INVALID_TABLE_ADDR,
42 .smbios = EFI_INVALID_TABLE_ADDR,
43 .smbios3 = EFI_INVALID_TABLE_ADDR,
44 .esrt = EFI_INVALID_TABLE_ADDR,
45 .tpm_log = EFI_INVALID_TABLE_ADDR,
46 .tpm_final_log = EFI_INVALID_TABLE_ADDR,
47 #ifdef CONFIG_LOAD_UEFI_KEYS
48 .mokvar_table = EFI_INVALID_TABLE_ADDR,
49 #endif
50 #ifdef CONFIG_EFI_COCO_SECRET
51 .coco_secret = EFI_INVALID_TABLE_ADDR,
52 #endif
53 };
54 EXPORT_SYMBOL(efi);
55
56 unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
57 static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
58 static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
59 static unsigned long __initdata initrd = EFI_INVALID_TABLE_ADDR;
60
61 struct mm_struct efi_mm = {
62 .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock),
63 .mm_users = ATOMIC_INIT(2),
64 .mm_count = ATOMIC_INIT(1),
65 .write_protect_seq = SEQCNT_ZERO(efi_mm.write_protect_seq),
66 MMAP_LOCK_INITIALIZER(efi_mm)
67 .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
68 .mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
69 .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0},
70 };
71
72 struct workqueue_struct *efi_rts_wq;
73
74 static bool disable_runtime = IS_ENABLED(CONFIG_EFI_DISABLE_RUNTIME);
setup_noefi(char * arg)75 static int __init setup_noefi(char *arg)
76 {
77 disable_runtime = true;
78 return 0;
79 }
80 early_param("noefi", setup_noefi);
81
efi_runtime_disabled(void)82 bool efi_runtime_disabled(void)
83 {
84 return disable_runtime;
85 }
86
__efi_soft_reserve_enabled(void)87 bool __pure __efi_soft_reserve_enabled(void)
88 {
89 return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
90 }
91
parse_efi_cmdline(char * str)92 static int __init parse_efi_cmdline(char *str)
93 {
94 if (!str) {
95 pr_warn("need at least one option\n");
96 return -EINVAL;
97 }
98
99 if (parse_option_str(str, "debug"))
100 set_bit(EFI_DBG, &efi.flags);
101
102 if (parse_option_str(str, "noruntime"))
103 disable_runtime = true;
104
105 if (parse_option_str(str, "runtime"))
106 disable_runtime = false;
107
108 if (parse_option_str(str, "nosoftreserve"))
109 set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
110
111 return 0;
112 }
113 early_param("efi", parse_efi_cmdline);
114
115 struct kobject *efi_kobj;
116
117 /*
118 * Let's not leave out systab information that snuck into
119 * the efivars driver
120 * Note, do not add more fields in systab sysfs file as it breaks sysfs
121 * one value per file rule!
122 */
systab_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)123 static ssize_t systab_show(struct kobject *kobj,
124 struct kobj_attribute *attr, char *buf)
125 {
126 char *str = buf;
127
128 if (!kobj || !buf)
129 return -EINVAL;
130
131 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
132 str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
133 if (efi.acpi != EFI_INVALID_TABLE_ADDR)
134 str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
135 /*
136 * If both SMBIOS and SMBIOS3 entry points are implemented, the
137 * SMBIOS3 entry point shall be preferred, so we list it first to
138 * let applications stop parsing after the first match.
139 */
140 if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
141 str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
142 if (efi.smbios != EFI_INVALID_TABLE_ADDR)
143 str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
144
145 if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86))
146 str = efi_systab_show_arch(str);
147
148 return str - buf;
149 }
150
151 static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
152
fw_platform_size_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)153 static ssize_t fw_platform_size_show(struct kobject *kobj,
154 struct kobj_attribute *attr, char *buf)
155 {
156 return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
157 }
158
159 extern __weak struct kobj_attribute efi_attr_fw_vendor;
160 extern __weak struct kobj_attribute efi_attr_runtime;
161 extern __weak struct kobj_attribute efi_attr_config_table;
162 static struct kobj_attribute efi_attr_fw_platform_size =
163 __ATTR_RO(fw_platform_size);
164
165 static struct attribute *efi_subsys_attrs[] = {
166 &efi_attr_systab.attr,
167 &efi_attr_fw_platform_size.attr,
168 &efi_attr_fw_vendor.attr,
169 &efi_attr_runtime.attr,
170 &efi_attr_config_table.attr,
171 NULL,
172 };
173
efi_attr_is_visible(struct kobject * kobj,struct attribute * attr,int n)174 umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
175 int n)
176 {
177 return attr->mode;
178 }
179
180 static const struct attribute_group efi_subsys_attr_group = {
181 .attrs = efi_subsys_attrs,
182 .is_visible = efi_attr_is_visible,
183 };
184
185 static struct efivars generic_efivars;
186 static struct efivar_operations generic_ops;
187
generic_ops_register(void)188 static int generic_ops_register(void)
189 {
190 generic_ops.get_variable = efi.get_variable;
191 generic_ops.get_next_variable = efi.get_next_variable;
192 generic_ops.query_variable_store = efi_query_variable_store;
193
194 if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
195 generic_ops.set_variable = efi.set_variable;
196 generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
197 }
198 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
199 }
200
generic_ops_unregister(void)201 static void generic_ops_unregister(void)
202 {
203 efivars_unregister(&generic_efivars);
204 }
205
206 #ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
207 #define EFIVAR_SSDT_NAME_MAX 16UL
208 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
efivar_ssdt_setup(char * str)209 static int __init efivar_ssdt_setup(char *str)
210 {
211 int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
212
213 if (ret)
214 return ret;
215
216 if (strlen(str) < sizeof(efivar_ssdt))
217 memcpy(efivar_ssdt, str, strlen(str));
218 else
219 pr_warn("efivar_ssdt: name too long: %s\n", str);
220 return 1;
221 }
222 __setup("efivar_ssdt=", efivar_ssdt_setup);
223
efivar_ssdt_load(void)224 static __init int efivar_ssdt_load(void)
225 {
226 unsigned long name_size = 256;
227 efi_char16_t *name = NULL;
228 efi_status_t status;
229 efi_guid_t guid;
230
231 if (!efivar_ssdt[0])
232 return 0;
233
234 name = kzalloc(name_size, GFP_KERNEL);
235 if (!name)
236 return -ENOMEM;
237
238 for (;;) {
239 char utf8_name[EFIVAR_SSDT_NAME_MAX];
240 unsigned long data_size = 0;
241 void *data;
242 int limit;
243
244 status = efi.get_next_variable(&name_size, name, &guid);
245 if (status == EFI_NOT_FOUND) {
246 break;
247 } else if (status == EFI_BUFFER_TOO_SMALL) {
248 name = krealloc(name, name_size, GFP_KERNEL);
249 if (!name)
250 return -ENOMEM;
251 continue;
252 }
253
254 limit = min(EFIVAR_SSDT_NAME_MAX, name_size);
255 ucs2_as_utf8(utf8_name, name, limit - 1);
256 if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
257 continue;
258
259 pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid);
260
261 status = efi.get_variable(name, &guid, NULL, &data_size, NULL);
262 if (status != EFI_BUFFER_TOO_SMALL || !data_size)
263 return -EIO;
264
265 data = kmalloc(data_size, GFP_KERNEL);
266 if (!data)
267 return -ENOMEM;
268
269 status = efi.get_variable(name, &guid, NULL, &data_size, data);
270 if (status == EFI_SUCCESS) {
271 acpi_status ret = acpi_load_table(data, NULL);
272 if (ret)
273 pr_err("failed to load table: %u\n", ret);
274 else
275 continue;
276 } else {
277 pr_err("failed to get var data: 0x%lx\n", status);
278 }
279 kfree(data);
280 }
281 return 0;
282 }
283 #else
efivar_ssdt_load(void)284 static inline int efivar_ssdt_load(void) { return 0; }
285 #endif
286
287 #ifdef CONFIG_DEBUG_FS
288
289 #define EFI_DEBUGFS_MAX_BLOBS 32
290
291 static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
292
efi_debugfs_init(void)293 static void __init efi_debugfs_init(void)
294 {
295 struct dentry *efi_debugfs;
296 efi_memory_desc_t *md;
297 char name[32];
298 int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
299 int i = 0;
300
301 efi_debugfs = debugfs_create_dir("efi", NULL);
302 if (IS_ERR_OR_NULL(efi_debugfs))
303 return;
304
305 for_each_efi_memory_desc(md) {
306 switch (md->type) {
307 case EFI_BOOT_SERVICES_CODE:
308 snprintf(name, sizeof(name), "boot_services_code%d",
309 type_count[md->type]++);
310 break;
311 case EFI_BOOT_SERVICES_DATA:
312 snprintf(name, sizeof(name), "boot_services_data%d",
313 type_count[md->type]++);
314 break;
315 default:
316 continue;
317 }
318
319 if (i >= EFI_DEBUGFS_MAX_BLOBS) {
320 pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
321 EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
322 break;
323 }
324
325 debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
326 debugfs_blob[i].data = memremap(md->phys_addr,
327 debugfs_blob[i].size,
328 MEMREMAP_WB);
329 if (!debugfs_blob[i].data)
330 continue;
331
332 debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
333 i++;
334 }
335 }
336 #else
efi_debugfs_init(void)337 static inline void efi_debugfs_init(void) {}
338 #endif
339
340 /*
341 * We register the efi subsystem with the firmware subsystem and the
342 * efivars subsystem with the efi subsystem, if the system was booted with
343 * EFI.
344 */
efisubsys_init(void)345 static int __init efisubsys_init(void)
346 {
347 int error;
348
349 if (!efi_enabled(EFI_RUNTIME_SERVICES))
350 efi.runtime_supported_mask = 0;
351
352 if (!efi_enabled(EFI_BOOT))
353 return 0;
354
355 if (efi.runtime_supported_mask) {
356 /*
357 * Since we process only one efi_runtime_service() at a time, an
358 * ordered workqueue (which creates only one execution context)
359 * should suffice for all our needs.
360 */
361 efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
362 if (!efi_rts_wq) {
363 pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
364 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
365 efi.runtime_supported_mask = 0;
366 return 0;
367 }
368 }
369
370 if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
371 platform_device_register_simple("rtc-efi", 0, NULL, 0);
372
373 /* We register the efi directory at /sys/firmware/efi */
374 efi_kobj = kobject_create_and_add("efi", firmware_kobj);
375 if (!efi_kobj) {
376 pr_err("efi: Firmware registration failed.\n");
377 error = -ENOMEM;
378 goto err_destroy_wq;
379 }
380
381 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
382 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
383 error = generic_ops_register();
384 if (error)
385 goto err_put;
386 efivar_ssdt_load();
387 platform_device_register_simple("efivars", 0, NULL, 0);
388 }
389
390 error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
391 if (error) {
392 pr_err("efi: Sysfs attribute export failed with error %d.\n",
393 error);
394 goto err_unregister;
395 }
396
397 error = efi_runtime_map_init(efi_kobj);
398 if (error)
399 goto err_remove_group;
400
401 /* and the standard mountpoint for efivarfs */
402 error = sysfs_create_mount_point(efi_kobj, "efivars");
403 if (error) {
404 pr_err("efivars: Subsystem registration failed.\n");
405 goto err_remove_group;
406 }
407
408 if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
409 efi_debugfs_init();
410
411 #ifdef CONFIG_EFI_COCO_SECRET
412 if (efi.coco_secret != EFI_INVALID_TABLE_ADDR)
413 platform_device_register_simple("efi_secret", 0, NULL, 0);
414 #endif
415
416 return 0;
417
418 err_remove_group:
419 sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
420 err_unregister:
421 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
422 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
423 generic_ops_unregister();
424 err_put:
425 kobject_put(efi_kobj);
426 err_destroy_wq:
427 if (efi_rts_wq)
428 destroy_workqueue(efi_rts_wq);
429
430 return error;
431 }
432
433 subsys_initcall(efisubsys_init);
434
efi_find_mirror(void)435 void __init efi_find_mirror(void)
436 {
437 efi_memory_desc_t *md;
438 u64 mirror_size = 0, total_size = 0;
439
440 if (!efi_enabled(EFI_MEMMAP))
441 return;
442
443 for_each_efi_memory_desc(md) {
444 unsigned long long start = md->phys_addr;
445 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
446
447 total_size += size;
448 if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
449 memblock_mark_mirror(start, size);
450 mirror_size += size;
451 }
452 }
453 if (mirror_size)
454 pr_info("Memory: %lldM/%lldM mirrored memory\n",
455 mirror_size>>20, total_size>>20);
456 }
457
458 /*
459 * Find the efi memory descriptor for a given physical address. Given a
460 * physical address, determine if it exists within an EFI Memory Map entry,
461 * and if so, populate the supplied memory descriptor with the appropriate
462 * data.
463 */
efi_mem_desc_lookup(u64 phys_addr,efi_memory_desc_t * out_md)464 int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
465 {
466 efi_memory_desc_t *md;
467
468 if (!efi_enabled(EFI_MEMMAP)) {
469 pr_err_once("EFI_MEMMAP is not enabled.\n");
470 return -EINVAL;
471 }
472
473 if (!out_md) {
474 pr_err_once("out_md is null.\n");
475 return -EINVAL;
476 }
477
478 for_each_efi_memory_desc(md) {
479 u64 size;
480 u64 end;
481
482 size = md->num_pages << EFI_PAGE_SHIFT;
483 end = md->phys_addr + size;
484 if (phys_addr >= md->phys_addr && phys_addr < end) {
485 memcpy(out_md, md, sizeof(*out_md));
486 return 0;
487 }
488 }
489 return -ENOENT;
490 }
491
492 /*
493 * Calculate the highest address of an efi memory descriptor.
494 */
efi_mem_desc_end(efi_memory_desc_t * md)495 u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
496 {
497 u64 size = md->num_pages << EFI_PAGE_SHIFT;
498 u64 end = md->phys_addr + size;
499 return end;
500 }
501
efi_arch_mem_reserve(phys_addr_t addr,u64 size)502 void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
503
504 /**
505 * efi_mem_reserve - Reserve an EFI memory region
506 * @addr: Physical address to reserve
507 * @size: Size of reservation
508 *
509 * Mark a region as reserved from general kernel allocation and
510 * prevent it being released by efi_free_boot_services().
511 *
512 * This function should be called drivers once they've parsed EFI
513 * configuration tables to figure out where their data lives, e.g.
514 * efi_esrt_init().
515 */
efi_mem_reserve(phys_addr_t addr,u64 size)516 void __init efi_mem_reserve(phys_addr_t addr, u64 size)
517 {
518 if (!memblock_is_region_reserved(addr, size))
519 memblock_reserve(addr, size);
520
521 /*
522 * Some architectures (x86) reserve all boot services ranges
523 * until efi_free_boot_services() because of buggy firmware
524 * implementations. This means the above memblock_reserve() is
525 * superfluous on x86 and instead what it needs to do is
526 * ensure the @start, @size is not freed.
527 */
528 efi_arch_mem_reserve(addr, size);
529 }
530
531 static const efi_config_table_type_t common_tables[] __initconst = {
532 {ACPI_20_TABLE_GUID, &efi.acpi20, "ACPI 2.0" },
533 {ACPI_TABLE_GUID, &efi.acpi, "ACPI" },
534 {SMBIOS_TABLE_GUID, &efi.smbios, "SMBIOS" },
535 {SMBIOS3_TABLE_GUID, &efi.smbios3, "SMBIOS 3.0" },
536 {EFI_SYSTEM_RESOURCE_TABLE_GUID, &efi.esrt, "ESRT" },
537 {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, &efi_mem_attr_table, "MEMATTR" },
538 {LINUX_EFI_RANDOM_SEED_TABLE_GUID, &efi_rng_seed, "RNG" },
539 {LINUX_EFI_TPM_EVENT_LOG_GUID, &efi.tpm_log, "TPMEventLog" },
540 {LINUX_EFI_TPM_FINAL_LOG_GUID, &efi.tpm_final_log, "TPMFinalLog" },
541 {LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" },
542 {LINUX_EFI_INITRD_MEDIA_GUID, &initrd, "INITRD" },
543 {EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" },
544 #ifdef CONFIG_EFI_RCI2_TABLE
545 {DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys },
546 #endif
547 #ifdef CONFIG_LOAD_UEFI_KEYS
548 {LINUX_EFI_MOK_VARIABLE_TABLE_GUID, &efi.mokvar_table, "MOKvar" },
549 #endif
550 #ifdef CONFIG_EFI_COCO_SECRET
551 {LINUX_EFI_COCO_SECRET_AREA_GUID, &efi.coco_secret, "CocoSecret" },
552 #endif
553 {},
554 };
555
match_config_table(const efi_guid_t * guid,unsigned long table,const efi_config_table_type_t * table_types)556 static __init int match_config_table(const efi_guid_t *guid,
557 unsigned long table,
558 const efi_config_table_type_t *table_types)
559 {
560 int i;
561
562 for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
563 if (!efi_guidcmp(*guid, table_types[i].guid)) {
564 *(table_types[i].ptr) = table;
565 if (table_types[i].name[0])
566 pr_cont("%s=0x%lx ",
567 table_types[i].name, table);
568 return 1;
569 }
570 }
571
572 return 0;
573 }
574
efi_config_parse_tables(const efi_config_table_t * config_tables,int count,const efi_config_table_type_t * arch_tables)575 int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
576 int count,
577 const efi_config_table_type_t *arch_tables)
578 {
579 const efi_config_table_64_t *tbl64 = (void *)config_tables;
580 const efi_config_table_32_t *tbl32 = (void *)config_tables;
581 const efi_guid_t *guid;
582 unsigned long table;
583 int i;
584
585 pr_info("");
586 for (i = 0; i < count; i++) {
587 if (!IS_ENABLED(CONFIG_X86)) {
588 guid = &config_tables[i].guid;
589 table = (unsigned long)config_tables[i].table;
590 } else if (efi_enabled(EFI_64BIT)) {
591 guid = &tbl64[i].guid;
592 table = tbl64[i].table;
593
594 if (IS_ENABLED(CONFIG_X86_32) &&
595 tbl64[i].table > U32_MAX) {
596 pr_cont("\n");
597 pr_err("Table located above 4GB, disabling EFI.\n");
598 return -EINVAL;
599 }
600 } else {
601 guid = &tbl32[i].guid;
602 table = tbl32[i].table;
603 }
604
605 if (!match_config_table(guid, table, common_tables) && arch_tables)
606 match_config_table(guid, table, arch_tables);
607 }
608 pr_cont("\n");
609 set_bit(EFI_CONFIG_TABLES, &efi.flags);
610
611 if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
612 struct linux_efi_random_seed *seed;
613 u32 size = 0;
614
615 seed = early_memremap(efi_rng_seed, sizeof(*seed));
616 if (seed != NULL) {
617 size = min_t(u32, seed->size, SZ_1K); // sanity check
618 early_memunmap(seed, sizeof(*seed));
619 } else {
620 pr_err("Could not map UEFI random seed!\n");
621 }
622 if (size > 0) {
623 seed = early_memremap(efi_rng_seed,
624 sizeof(*seed) + size);
625 if (seed != NULL) {
626 add_bootloader_randomness(seed->bits, size);
627 memzero_explicit(seed->bits, size);
628 early_memunmap(seed, sizeof(*seed) + size);
629 } else {
630 pr_err("Could not map UEFI random seed!\n");
631 }
632 }
633 }
634
635 if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
636 efi_memattr_init();
637
638 efi_tpm_eventlog_init();
639
640 if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
641 unsigned long prsv = mem_reserve;
642
643 while (prsv) {
644 struct linux_efi_memreserve *rsv;
645 u8 *p;
646
647 /*
648 * Just map a full page: that is what we will get
649 * anyway, and it permits us to map the entire entry
650 * before knowing its size.
651 */
652 p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
653 PAGE_SIZE);
654 if (p == NULL) {
655 pr_err("Could not map UEFI memreserve entry!\n");
656 return -ENOMEM;
657 }
658
659 rsv = (void *)(p + prsv % PAGE_SIZE);
660
661 /* reserve the entry itself */
662 memblock_reserve(prsv,
663 struct_size(rsv, entry, rsv->size));
664
665 for (i = 0; i < atomic_read(&rsv->count); i++) {
666 memblock_reserve(rsv->entry[i].base,
667 rsv->entry[i].size);
668 }
669
670 prsv = rsv->next;
671 early_memunmap(p, PAGE_SIZE);
672 }
673 }
674
675 if (rt_prop != EFI_INVALID_TABLE_ADDR) {
676 efi_rt_properties_table_t *tbl;
677
678 tbl = early_memremap(rt_prop, sizeof(*tbl));
679 if (tbl) {
680 efi.runtime_supported_mask &= tbl->runtime_services_supported;
681 early_memunmap(tbl, sizeof(*tbl));
682 }
683 }
684
685 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) &&
686 initrd != EFI_INVALID_TABLE_ADDR && phys_initrd_size == 0) {
687 struct linux_efi_initrd *tbl;
688
689 tbl = early_memremap(initrd, sizeof(*tbl));
690 if (tbl) {
691 phys_initrd_start = tbl->base;
692 phys_initrd_size = tbl->size;
693 early_memunmap(tbl, sizeof(*tbl));
694 }
695 }
696
697 return 0;
698 }
699
efi_systab_check_header(const efi_table_hdr_t * systab_hdr,int min_major_version)700 int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr,
701 int min_major_version)
702 {
703 if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
704 pr_err("System table signature incorrect!\n");
705 return -EINVAL;
706 }
707
708 if ((systab_hdr->revision >> 16) < min_major_version)
709 pr_err("Warning: System table version %d.%02d, expected %d.00 or greater!\n",
710 systab_hdr->revision >> 16,
711 systab_hdr->revision & 0xffff,
712 min_major_version);
713
714 return 0;
715 }
716
717 #ifndef CONFIG_IA64
map_fw_vendor(unsigned long fw_vendor,size_t size)718 static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
719 size_t size)
720 {
721 const efi_char16_t *ret;
722
723 ret = early_memremap_ro(fw_vendor, size);
724 if (!ret)
725 pr_err("Could not map the firmware vendor!\n");
726 return ret;
727 }
728
unmap_fw_vendor(const void * fw_vendor,size_t size)729 static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
730 {
731 early_memunmap((void *)fw_vendor, size);
732 }
733 #else
734 #define map_fw_vendor(p, s) __va(p)
735 #define unmap_fw_vendor(v, s)
736 #endif
737
efi_systab_report_header(const efi_table_hdr_t * systab_hdr,unsigned long fw_vendor)738 void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
739 unsigned long fw_vendor)
740 {
741 char vendor[100] = "unknown";
742 const efi_char16_t *c16;
743 size_t i;
744
745 c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
746 if (c16) {
747 for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
748 vendor[i] = c16[i];
749 vendor[i] = '\0';
750
751 unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
752 }
753
754 pr_info("EFI v%u.%.02u by %s\n",
755 systab_hdr->revision >> 16,
756 systab_hdr->revision & 0xffff,
757 vendor);
758
759 if (IS_ENABLED(CONFIG_X86_64) &&
760 systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
761 !strcmp(vendor, "Apple")) {
762 pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n");
763 efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
764 }
765 }
766
767 static __initdata char memory_type_name[][13] = {
768 "Reserved",
769 "Loader Code",
770 "Loader Data",
771 "Boot Code",
772 "Boot Data",
773 "Runtime Code",
774 "Runtime Data",
775 "Conventional",
776 "Unusable",
777 "ACPI Reclaim",
778 "ACPI Mem NVS",
779 "MMIO",
780 "MMIO Port",
781 "PAL Code",
782 "Persistent",
783 };
784
efi_md_typeattr_format(char * buf,size_t size,const efi_memory_desc_t * md)785 char * __init efi_md_typeattr_format(char *buf, size_t size,
786 const efi_memory_desc_t *md)
787 {
788 char *pos;
789 int type_len;
790 u64 attr;
791
792 pos = buf;
793 if (md->type >= ARRAY_SIZE(memory_type_name))
794 type_len = snprintf(pos, size, "[type=%u", md->type);
795 else
796 type_len = snprintf(pos, size, "[%-*s",
797 (int)(sizeof(memory_type_name[0]) - 1),
798 memory_type_name[md->type]);
799 if (type_len >= size)
800 return buf;
801
802 pos += type_len;
803 size -= type_len;
804
805 attr = md->attribute;
806 if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
807 EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
808 EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
809 EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
810 EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
811 snprintf(pos, size, "|attr=0x%016llx]",
812 (unsigned long long)attr);
813 else
814 snprintf(pos, size,
815 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
816 attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
817 attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
818 attr & EFI_MEMORY_CPU_CRYPTO ? "CC" : "",
819 attr & EFI_MEMORY_SP ? "SP" : "",
820 attr & EFI_MEMORY_NV ? "NV" : "",
821 attr & EFI_MEMORY_XP ? "XP" : "",
822 attr & EFI_MEMORY_RP ? "RP" : "",
823 attr & EFI_MEMORY_WP ? "WP" : "",
824 attr & EFI_MEMORY_RO ? "RO" : "",
825 attr & EFI_MEMORY_UCE ? "UCE" : "",
826 attr & EFI_MEMORY_WB ? "WB" : "",
827 attr & EFI_MEMORY_WT ? "WT" : "",
828 attr & EFI_MEMORY_WC ? "WC" : "",
829 attr & EFI_MEMORY_UC ? "UC" : "");
830 return buf;
831 }
832
833 /*
834 * IA64 has a funky EFI memory map that doesn't work the same way as
835 * other architectures.
836 */
837 #ifndef CONFIG_IA64
838 /*
839 * efi_mem_attributes - lookup memmap attributes for physical address
840 * @phys_addr: the physical address to lookup
841 *
842 * Search in the EFI memory map for the region covering
843 * @phys_addr. Returns the EFI memory attributes if the region
844 * was found in the memory map, 0 otherwise.
845 */
efi_mem_attributes(unsigned long phys_addr)846 u64 efi_mem_attributes(unsigned long phys_addr)
847 {
848 efi_memory_desc_t *md;
849
850 if (!efi_enabled(EFI_MEMMAP))
851 return 0;
852
853 for_each_efi_memory_desc(md) {
854 if ((md->phys_addr <= phys_addr) &&
855 (phys_addr < (md->phys_addr +
856 (md->num_pages << EFI_PAGE_SHIFT))))
857 return md->attribute;
858 }
859 return 0;
860 }
861
862 /*
863 * efi_mem_type - lookup memmap type for physical address
864 * @phys_addr: the physical address to lookup
865 *
866 * Search in the EFI memory map for the region covering @phys_addr.
867 * Returns the EFI memory type if the region was found in the memory
868 * map, -EINVAL otherwise.
869 */
efi_mem_type(unsigned long phys_addr)870 int efi_mem_type(unsigned long phys_addr)
871 {
872 const efi_memory_desc_t *md;
873
874 if (!efi_enabled(EFI_MEMMAP))
875 return -ENOTSUPP;
876
877 for_each_efi_memory_desc(md) {
878 if ((md->phys_addr <= phys_addr) &&
879 (phys_addr < (md->phys_addr +
880 (md->num_pages << EFI_PAGE_SHIFT))))
881 return md->type;
882 }
883 return -EINVAL;
884 }
885 #endif
886
efi_status_to_err(efi_status_t status)887 int efi_status_to_err(efi_status_t status)
888 {
889 int err;
890
891 switch (status) {
892 case EFI_SUCCESS:
893 err = 0;
894 break;
895 case EFI_INVALID_PARAMETER:
896 err = -EINVAL;
897 break;
898 case EFI_OUT_OF_RESOURCES:
899 err = -ENOSPC;
900 break;
901 case EFI_DEVICE_ERROR:
902 err = -EIO;
903 break;
904 case EFI_WRITE_PROTECTED:
905 err = -EROFS;
906 break;
907 case EFI_SECURITY_VIOLATION:
908 err = -EACCES;
909 break;
910 case EFI_NOT_FOUND:
911 err = -ENOENT;
912 break;
913 case EFI_ABORTED:
914 err = -EINTR;
915 break;
916 default:
917 err = -EINVAL;
918 }
919
920 return err;
921 }
922 EXPORT_SYMBOL_GPL(efi_status_to_err);
923
924 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
925 static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
926
efi_memreserve_map_root(void)927 static int __init efi_memreserve_map_root(void)
928 {
929 if (mem_reserve == EFI_INVALID_TABLE_ADDR)
930 return -ENODEV;
931
932 efi_memreserve_root = memremap(mem_reserve,
933 sizeof(*efi_memreserve_root),
934 MEMREMAP_WB);
935 if (WARN_ON_ONCE(!efi_memreserve_root))
936 return -ENOMEM;
937 return 0;
938 }
939
efi_mem_reserve_iomem(phys_addr_t addr,u64 size)940 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
941 {
942 struct resource *res, *parent;
943 int ret;
944
945 res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
946 if (!res)
947 return -ENOMEM;
948
949 res->name = "reserved";
950 res->flags = IORESOURCE_MEM;
951 res->start = addr;
952 res->end = addr + size - 1;
953
954 /* we expect a conflict with a 'System RAM' region */
955 parent = request_resource_conflict(&iomem_resource, res);
956 ret = parent ? request_resource(parent, res) : 0;
957
958 /*
959 * Given that efi_mem_reserve_iomem() can be called at any
960 * time, only call memblock_reserve() if the architecture
961 * keeps the infrastructure around.
962 */
963 if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
964 memblock_reserve(addr, size);
965
966 return ret;
967 }
968
efi_mem_reserve_persistent(phys_addr_t addr,u64 size)969 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
970 {
971 struct linux_efi_memreserve *rsv;
972 unsigned long prsv;
973 int rc, index;
974
975 if (efi_memreserve_root == (void *)ULONG_MAX)
976 return -ENODEV;
977
978 if (!efi_memreserve_root) {
979 rc = efi_memreserve_map_root();
980 if (rc)
981 return rc;
982 }
983
984 /* first try to find a slot in an existing linked list entry */
985 for (prsv = efi_memreserve_root->next; prsv; ) {
986 rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
987 index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
988 if (index < rsv->size) {
989 rsv->entry[index].base = addr;
990 rsv->entry[index].size = size;
991
992 memunmap(rsv);
993 return efi_mem_reserve_iomem(addr, size);
994 }
995 prsv = rsv->next;
996 memunmap(rsv);
997 }
998
999 /* no slot found - allocate a new linked list entry */
1000 rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
1001 if (!rsv)
1002 return -ENOMEM;
1003
1004 rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
1005 if (rc) {
1006 free_page((unsigned long)rsv);
1007 return rc;
1008 }
1009
1010 /*
1011 * The memremap() call above assumes that a linux_efi_memreserve entry
1012 * never crosses a page boundary, so let's ensure that this remains true
1013 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
1014 * using SZ_4K explicitly in the size calculation below.
1015 */
1016 rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
1017 atomic_set(&rsv->count, 1);
1018 rsv->entry[0].base = addr;
1019 rsv->entry[0].size = size;
1020
1021 spin_lock(&efi_mem_reserve_persistent_lock);
1022 rsv->next = efi_memreserve_root->next;
1023 efi_memreserve_root->next = __pa(rsv);
1024 spin_unlock(&efi_mem_reserve_persistent_lock);
1025
1026 return efi_mem_reserve_iomem(addr, size);
1027 }
1028
efi_memreserve_root_init(void)1029 static int __init efi_memreserve_root_init(void)
1030 {
1031 if (efi_memreserve_root)
1032 return 0;
1033 if (efi_memreserve_map_root())
1034 efi_memreserve_root = (void *)ULONG_MAX;
1035 return 0;
1036 }
1037 early_initcall(efi_memreserve_root_init);
1038
1039 #ifdef CONFIG_KEXEC
update_efi_random_seed(struct notifier_block * nb,unsigned long code,void * unused)1040 static int update_efi_random_seed(struct notifier_block *nb,
1041 unsigned long code, void *unused)
1042 {
1043 struct linux_efi_random_seed *seed;
1044 u32 size = 0;
1045
1046 if (!kexec_in_progress)
1047 return NOTIFY_DONE;
1048
1049 seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
1050 if (seed != NULL) {
1051 size = min(seed->size, EFI_RANDOM_SEED_SIZE);
1052 memunmap(seed);
1053 } else {
1054 pr_err("Could not map UEFI random seed!\n");
1055 }
1056 if (size > 0) {
1057 seed = memremap(efi_rng_seed, sizeof(*seed) + size,
1058 MEMREMAP_WB);
1059 if (seed != NULL) {
1060 seed->size = size;
1061 get_random_bytes(seed->bits, seed->size);
1062 memunmap(seed);
1063 } else {
1064 pr_err("Could not map UEFI random seed!\n");
1065 }
1066 }
1067 return NOTIFY_DONE;
1068 }
1069
1070 static struct notifier_block efi_random_seed_nb = {
1071 .notifier_call = update_efi_random_seed,
1072 };
1073
register_update_efi_random_seed(void)1074 static int __init register_update_efi_random_seed(void)
1075 {
1076 if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
1077 return 0;
1078 return register_reboot_notifier(&efi_random_seed_nb);
1079 }
1080 late_initcall(register_update_efi_random_seed);
1081 #endif
1082