1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * efi.c - EFI subsystem
4  *
5  * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
6  * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
7  * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
8  *
9  * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
10  * allowing the efivarfs to be mounted or the efivars module to be loaded.
11  * The existance of /sys/firmware/efi may also be used by userspace to
12  * determine that the system supports EFI.
13  */
14 
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 
17 #include <linux/kobject.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/debugfs.h>
21 #include <linux/device.h>
22 #include <linux/efi.h>
23 #include <linux/of.h>
24 #include <linux/io.h>
25 #include <linux/kexec.h>
26 #include <linux/platform_device.h>
27 #include <linux/random.h>
28 #include <linux/reboot.h>
29 #include <linux/slab.h>
30 #include <linux/acpi.h>
31 #include <linux/ucs2_string.h>
32 #include <linux/memblock.h>
33 #include <linux/security.h>
34 
35 #include <asm/early_ioremap.h>
36 
37 struct efi __read_mostly efi = {
38 	.runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
39 	.acpi			= EFI_INVALID_TABLE_ADDR,
40 	.acpi20			= EFI_INVALID_TABLE_ADDR,
41 	.smbios			= EFI_INVALID_TABLE_ADDR,
42 	.smbios3		= EFI_INVALID_TABLE_ADDR,
43 	.esrt			= EFI_INVALID_TABLE_ADDR,
44 	.tpm_log		= EFI_INVALID_TABLE_ADDR,
45 	.tpm_final_log		= EFI_INVALID_TABLE_ADDR,
46 #ifdef CONFIG_LOAD_UEFI_KEYS
47 	.mokvar_table		= EFI_INVALID_TABLE_ADDR,
48 #endif
49 #ifdef CONFIG_EFI_COCO_SECRET
50 	.coco_secret		= EFI_INVALID_TABLE_ADDR,
51 #endif
52 };
53 EXPORT_SYMBOL(efi);
54 
55 unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
56 static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
57 static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
58 
59 struct mm_struct efi_mm = {
60 	.mm_rb			= RB_ROOT,
61 	.mm_users		= ATOMIC_INIT(2),
62 	.mm_count		= ATOMIC_INIT(1),
63 	.write_protect_seq      = SEQCNT_ZERO(efi_mm.write_protect_seq),
64 	MMAP_LOCK_INITIALIZER(efi_mm)
65 	.page_table_lock	= __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
66 	.mmlist			= LIST_HEAD_INIT(efi_mm.mmlist),
67 	.cpu_bitmap		= { [BITS_TO_LONGS(NR_CPUS)] = 0},
68 };
69 
70 struct workqueue_struct *efi_rts_wq;
71 
72 static bool disable_runtime = IS_ENABLED(CONFIG_EFI_DISABLE_RUNTIME);
setup_noefi(char * arg)73 static int __init setup_noefi(char *arg)
74 {
75 	disable_runtime = true;
76 	return 0;
77 }
78 early_param("noefi", setup_noefi);
79 
efi_runtime_disabled(void)80 bool efi_runtime_disabled(void)
81 {
82 	return disable_runtime;
83 }
84 
__efi_soft_reserve_enabled(void)85 bool __pure __efi_soft_reserve_enabled(void)
86 {
87 	return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
88 }
89 
parse_efi_cmdline(char * str)90 static int __init parse_efi_cmdline(char *str)
91 {
92 	if (!str) {
93 		pr_warn("need at least one option\n");
94 		return -EINVAL;
95 	}
96 
97 	if (parse_option_str(str, "debug"))
98 		set_bit(EFI_DBG, &efi.flags);
99 
100 	if (parse_option_str(str, "noruntime"))
101 		disable_runtime = true;
102 
103 	if (parse_option_str(str, "runtime"))
104 		disable_runtime = false;
105 
106 	if (parse_option_str(str, "nosoftreserve"))
107 		set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
108 
109 	return 0;
110 }
111 early_param("efi", parse_efi_cmdline);
112 
113 struct kobject *efi_kobj;
114 
115 /*
116  * Let's not leave out systab information that snuck into
117  * the efivars driver
118  * Note, do not add more fields in systab sysfs file as it breaks sysfs
119  * one value per file rule!
120  */
systab_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)121 static ssize_t systab_show(struct kobject *kobj,
122 			   struct kobj_attribute *attr, char *buf)
123 {
124 	char *str = buf;
125 
126 	if (!kobj || !buf)
127 		return -EINVAL;
128 
129 	if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
130 		str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
131 	if (efi.acpi != EFI_INVALID_TABLE_ADDR)
132 		str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
133 	/*
134 	 * If both SMBIOS and SMBIOS3 entry points are implemented, the
135 	 * SMBIOS3 entry point shall be preferred, so we list it first to
136 	 * let applications stop parsing after the first match.
137 	 */
138 	if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
139 		str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
140 	if (efi.smbios != EFI_INVALID_TABLE_ADDR)
141 		str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
142 
143 	if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86))
144 		str = efi_systab_show_arch(str);
145 
146 	return str - buf;
147 }
148 
149 static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
150 
fw_platform_size_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)151 static ssize_t fw_platform_size_show(struct kobject *kobj,
152 				     struct kobj_attribute *attr, char *buf)
153 {
154 	return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
155 }
156 
157 extern __weak struct kobj_attribute efi_attr_fw_vendor;
158 extern __weak struct kobj_attribute efi_attr_runtime;
159 extern __weak struct kobj_attribute efi_attr_config_table;
160 static struct kobj_attribute efi_attr_fw_platform_size =
161 	__ATTR_RO(fw_platform_size);
162 
163 static struct attribute *efi_subsys_attrs[] = {
164 	&efi_attr_systab.attr,
165 	&efi_attr_fw_platform_size.attr,
166 	&efi_attr_fw_vendor.attr,
167 	&efi_attr_runtime.attr,
168 	&efi_attr_config_table.attr,
169 	NULL,
170 };
171 
efi_attr_is_visible(struct kobject * kobj,struct attribute * attr,int n)172 umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
173 				   int n)
174 {
175 	return attr->mode;
176 }
177 
178 static const struct attribute_group efi_subsys_attr_group = {
179 	.attrs = efi_subsys_attrs,
180 	.is_visible = efi_attr_is_visible,
181 };
182 
183 static struct efivars generic_efivars;
184 static struct efivar_operations generic_ops;
185 
generic_ops_register(void)186 static int generic_ops_register(void)
187 {
188 	generic_ops.get_variable = efi.get_variable;
189 	generic_ops.get_next_variable = efi.get_next_variable;
190 	generic_ops.query_variable_store = efi_query_variable_store;
191 
192 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
193 		generic_ops.set_variable = efi.set_variable;
194 		generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
195 	}
196 	return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
197 }
198 
generic_ops_unregister(void)199 static void generic_ops_unregister(void)
200 {
201 	efivars_unregister(&generic_efivars);
202 }
203 
204 #ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
205 #define EFIVAR_SSDT_NAME_MAX	16
206 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
efivar_ssdt_setup(char * str)207 static int __init efivar_ssdt_setup(char *str)
208 {
209 	int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
210 
211 	if (ret)
212 		return ret;
213 
214 	if (strlen(str) < sizeof(efivar_ssdt))
215 		memcpy(efivar_ssdt, str, strlen(str));
216 	else
217 		pr_warn("efivar_ssdt: name too long: %s\n", str);
218 	return 1;
219 }
220 __setup("efivar_ssdt=", efivar_ssdt_setup);
221 
efivar_ssdt_iter(efi_char16_t * name,efi_guid_t vendor,unsigned long name_size,void * data)222 static __init int efivar_ssdt_iter(efi_char16_t *name, efi_guid_t vendor,
223 				   unsigned long name_size, void *data)
224 {
225 	struct efivar_entry *entry;
226 	struct list_head *list = data;
227 	char utf8_name[EFIVAR_SSDT_NAME_MAX];
228 	int limit = min_t(unsigned long, EFIVAR_SSDT_NAME_MAX, name_size);
229 
230 	ucs2_as_utf8(utf8_name, name, limit - 1);
231 	if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
232 		return 0;
233 
234 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
235 	if (!entry)
236 		return 0;
237 
238 	memcpy(entry->var.VariableName, name, name_size);
239 	memcpy(&entry->var.VendorGuid, &vendor, sizeof(efi_guid_t));
240 
241 	efivar_entry_add(entry, list);
242 
243 	return 0;
244 }
245 
efivar_ssdt_load(void)246 static __init int efivar_ssdt_load(void)
247 {
248 	LIST_HEAD(entries);
249 	struct efivar_entry *entry, *aux;
250 	unsigned long size;
251 	void *data;
252 	int ret;
253 
254 	if (!efivar_ssdt[0])
255 		return 0;
256 
257 	ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
258 
259 	list_for_each_entry_safe(entry, aux, &entries, list) {
260 		pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt,
261 			&entry->var.VendorGuid);
262 
263 		list_del(&entry->list);
264 
265 		ret = efivar_entry_size(entry, &size);
266 		if (ret) {
267 			pr_err("failed to get var size\n");
268 			goto free_entry;
269 		}
270 
271 		data = kmalloc(size, GFP_KERNEL);
272 		if (!data) {
273 			ret = -ENOMEM;
274 			goto free_entry;
275 		}
276 
277 		ret = efivar_entry_get(entry, NULL, &size, data);
278 		if (ret) {
279 			pr_err("failed to get var data\n");
280 			goto free_data;
281 		}
282 
283 		ret = acpi_load_table(data, NULL);
284 		if (ret) {
285 			pr_err("failed to load table: %d\n", ret);
286 			goto free_data;
287 		}
288 
289 		goto free_entry;
290 
291 free_data:
292 		kfree(data);
293 
294 free_entry:
295 		kfree(entry);
296 	}
297 
298 	return ret;
299 }
300 #else
efivar_ssdt_load(void)301 static inline int efivar_ssdt_load(void) { return 0; }
302 #endif
303 
304 #ifdef CONFIG_DEBUG_FS
305 
306 #define EFI_DEBUGFS_MAX_BLOBS 32
307 
308 static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
309 
efi_debugfs_init(void)310 static void __init efi_debugfs_init(void)
311 {
312 	struct dentry *efi_debugfs;
313 	efi_memory_desc_t *md;
314 	char name[32];
315 	int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
316 	int i = 0;
317 
318 	efi_debugfs = debugfs_create_dir("efi", NULL);
319 	if (IS_ERR_OR_NULL(efi_debugfs))
320 		return;
321 
322 	for_each_efi_memory_desc(md) {
323 		switch (md->type) {
324 		case EFI_BOOT_SERVICES_CODE:
325 			snprintf(name, sizeof(name), "boot_services_code%d",
326 				 type_count[md->type]++);
327 			break;
328 		case EFI_BOOT_SERVICES_DATA:
329 			snprintf(name, sizeof(name), "boot_services_data%d",
330 				 type_count[md->type]++);
331 			break;
332 		default:
333 			continue;
334 		}
335 
336 		if (i >= EFI_DEBUGFS_MAX_BLOBS) {
337 			pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
338 				EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
339 			break;
340 		}
341 
342 		debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
343 		debugfs_blob[i].data = memremap(md->phys_addr,
344 						debugfs_blob[i].size,
345 						MEMREMAP_WB);
346 		if (!debugfs_blob[i].data)
347 			continue;
348 
349 		debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
350 		i++;
351 	}
352 }
353 #else
efi_debugfs_init(void)354 static inline void efi_debugfs_init(void) {}
355 #endif
356 
357 /*
358  * We register the efi subsystem with the firmware subsystem and the
359  * efivars subsystem with the efi subsystem, if the system was booted with
360  * EFI.
361  */
efisubsys_init(void)362 static int __init efisubsys_init(void)
363 {
364 	int error;
365 
366 	if (!efi_enabled(EFI_RUNTIME_SERVICES))
367 		efi.runtime_supported_mask = 0;
368 
369 	if (!efi_enabled(EFI_BOOT))
370 		return 0;
371 
372 	if (efi.runtime_supported_mask) {
373 		/*
374 		 * Since we process only one efi_runtime_service() at a time, an
375 		 * ordered workqueue (which creates only one execution context)
376 		 * should suffice for all our needs.
377 		 */
378 		efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
379 		if (!efi_rts_wq) {
380 			pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
381 			clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
382 			efi.runtime_supported_mask = 0;
383 			return 0;
384 		}
385 	}
386 
387 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
388 		platform_device_register_simple("rtc-efi", 0, NULL, 0);
389 
390 	/* We register the efi directory at /sys/firmware/efi */
391 	efi_kobj = kobject_create_and_add("efi", firmware_kobj);
392 	if (!efi_kobj) {
393 		pr_err("efi: Firmware registration failed.\n");
394 		destroy_workqueue(efi_rts_wq);
395 		return -ENOMEM;
396 	}
397 
398 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
399 				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
400 		error = generic_ops_register();
401 		if (error)
402 			goto err_put;
403 		efivar_ssdt_load();
404 		platform_device_register_simple("efivars", 0, NULL, 0);
405 	}
406 
407 	error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
408 	if (error) {
409 		pr_err("efi: Sysfs attribute export failed with error %d.\n",
410 		       error);
411 		goto err_unregister;
412 	}
413 
414 	error = efi_runtime_map_init(efi_kobj);
415 	if (error)
416 		goto err_remove_group;
417 
418 	/* and the standard mountpoint for efivarfs */
419 	error = sysfs_create_mount_point(efi_kobj, "efivars");
420 	if (error) {
421 		pr_err("efivars: Subsystem registration failed.\n");
422 		goto err_remove_group;
423 	}
424 
425 	if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
426 		efi_debugfs_init();
427 
428 #ifdef CONFIG_EFI_COCO_SECRET
429 	if (efi.coco_secret != EFI_INVALID_TABLE_ADDR)
430 		platform_device_register_simple("efi_secret", 0, NULL, 0);
431 #endif
432 
433 	return 0;
434 
435 err_remove_group:
436 	sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
437 err_unregister:
438 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
439 				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
440 		generic_ops_unregister();
441 err_put:
442 	kobject_put(efi_kobj);
443 	destroy_workqueue(efi_rts_wq);
444 	return error;
445 }
446 
447 subsys_initcall(efisubsys_init);
448 
449 /*
450  * Find the efi memory descriptor for a given physical address.  Given a
451  * physical address, determine if it exists within an EFI Memory Map entry,
452  * and if so, populate the supplied memory descriptor with the appropriate
453  * data.
454  */
efi_mem_desc_lookup(u64 phys_addr,efi_memory_desc_t * out_md)455 int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
456 {
457 	efi_memory_desc_t *md;
458 
459 	if (!efi_enabled(EFI_MEMMAP)) {
460 		pr_err_once("EFI_MEMMAP is not enabled.\n");
461 		return -EINVAL;
462 	}
463 
464 	if (!out_md) {
465 		pr_err_once("out_md is null.\n");
466 		return -EINVAL;
467         }
468 
469 	for_each_efi_memory_desc(md) {
470 		u64 size;
471 		u64 end;
472 
473 		size = md->num_pages << EFI_PAGE_SHIFT;
474 		end = md->phys_addr + size;
475 		if (phys_addr >= md->phys_addr && phys_addr < end) {
476 			memcpy(out_md, md, sizeof(*out_md));
477 			return 0;
478 		}
479 	}
480 	return -ENOENT;
481 }
482 
483 /*
484  * Calculate the highest address of an efi memory descriptor.
485  */
efi_mem_desc_end(efi_memory_desc_t * md)486 u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
487 {
488 	u64 size = md->num_pages << EFI_PAGE_SHIFT;
489 	u64 end = md->phys_addr + size;
490 	return end;
491 }
492 
efi_arch_mem_reserve(phys_addr_t addr,u64 size)493 void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
494 
495 /**
496  * efi_mem_reserve - Reserve an EFI memory region
497  * @addr: Physical address to reserve
498  * @size: Size of reservation
499  *
500  * Mark a region as reserved from general kernel allocation and
501  * prevent it being released by efi_free_boot_services().
502  *
503  * This function should be called drivers once they've parsed EFI
504  * configuration tables to figure out where their data lives, e.g.
505  * efi_esrt_init().
506  */
efi_mem_reserve(phys_addr_t addr,u64 size)507 void __init efi_mem_reserve(phys_addr_t addr, u64 size)
508 {
509 	if (!memblock_is_region_reserved(addr, size))
510 		memblock_reserve(addr, size);
511 
512 	/*
513 	 * Some architectures (x86) reserve all boot services ranges
514 	 * until efi_free_boot_services() because of buggy firmware
515 	 * implementations. This means the above memblock_reserve() is
516 	 * superfluous on x86 and instead what it needs to do is
517 	 * ensure the @start, @size is not freed.
518 	 */
519 	efi_arch_mem_reserve(addr, size);
520 }
521 
522 static const efi_config_table_type_t common_tables[] __initconst = {
523 	{ACPI_20_TABLE_GUID,			&efi.acpi20,		"ACPI 2.0"	},
524 	{ACPI_TABLE_GUID,			&efi.acpi,		"ACPI"		},
525 	{SMBIOS_TABLE_GUID,			&efi.smbios,		"SMBIOS"	},
526 	{SMBIOS3_TABLE_GUID,			&efi.smbios3,		"SMBIOS 3.0"	},
527 	{EFI_SYSTEM_RESOURCE_TABLE_GUID,	&efi.esrt,		"ESRT"		},
528 	{EFI_MEMORY_ATTRIBUTES_TABLE_GUID,	&efi_mem_attr_table,	"MEMATTR"	},
529 	{LINUX_EFI_RANDOM_SEED_TABLE_GUID,	&efi_rng_seed,		"RNG"		},
530 	{LINUX_EFI_TPM_EVENT_LOG_GUID,		&efi.tpm_log,		"TPMEventLog"	},
531 	{LINUX_EFI_TPM_FINAL_LOG_GUID,		&efi.tpm_final_log,	"TPMFinalLog"	},
532 	{LINUX_EFI_MEMRESERVE_TABLE_GUID,	&mem_reserve,		"MEMRESERVE"	},
533 	{EFI_RT_PROPERTIES_TABLE_GUID,		&rt_prop,		"RTPROP"	},
534 #ifdef CONFIG_EFI_RCI2_TABLE
535 	{DELLEMC_EFI_RCI2_TABLE_GUID,		&rci2_table_phys			},
536 #endif
537 #ifdef CONFIG_LOAD_UEFI_KEYS
538 	{LINUX_EFI_MOK_VARIABLE_TABLE_GUID,	&efi.mokvar_table,	"MOKvar"	},
539 #endif
540 #ifdef CONFIG_EFI_COCO_SECRET
541 	{LINUX_EFI_COCO_SECRET_AREA_GUID,	&efi.coco_secret,	"CocoSecret"	},
542 #endif
543 	{},
544 };
545 
match_config_table(const efi_guid_t * guid,unsigned long table,const efi_config_table_type_t * table_types)546 static __init int match_config_table(const efi_guid_t *guid,
547 				     unsigned long table,
548 				     const efi_config_table_type_t *table_types)
549 {
550 	int i;
551 
552 	for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
553 		if (!efi_guidcmp(*guid, table_types[i].guid)) {
554 			*(table_types[i].ptr) = table;
555 			if (table_types[i].name[0])
556 				pr_cont("%s=0x%lx ",
557 					table_types[i].name, table);
558 			return 1;
559 		}
560 	}
561 
562 	return 0;
563 }
564 
efi_config_parse_tables(const efi_config_table_t * config_tables,int count,const efi_config_table_type_t * arch_tables)565 int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
566 				   int count,
567 				   const efi_config_table_type_t *arch_tables)
568 {
569 	const efi_config_table_64_t *tbl64 = (void *)config_tables;
570 	const efi_config_table_32_t *tbl32 = (void *)config_tables;
571 	const efi_guid_t *guid;
572 	unsigned long table;
573 	int i;
574 
575 	pr_info("");
576 	for (i = 0; i < count; i++) {
577 		if (!IS_ENABLED(CONFIG_X86)) {
578 			guid = &config_tables[i].guid;
579 			table = (unsigned long)config_tables[i].table;
580 		} else if (efi_enabled(EFI_64BIT)) {
581 			guid = &tbl64[i].guid;
582 			table = tbl64[i].table;
583 
584 			if (IS_ENABLED(CONFIG_X86_32) &&
585 			    tbl64[i].table > U32_MAX) {
586 				pr_cont("\n");
587 				pr_err("Table located above 4GB, disabling EFI.\n");
588 				return -EINVAL;
589 			}
590 		} else {
591 			guid = &tbl32[i].guid;
592 			table = tbl32[i].table;
593 		}
594 
595 		if (!match_config_table(guid, table, common_tables) && arch_tables)
596 			match_config_table(guid, table, arch_tables);
597 	}
598 	pr_cont("\n");
599 	set_bit(EFI_CONFIG_TABLES, &efi.flags);
600 
601 	if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
602 		struct linux_efi_random_seed *seed;
603 		u32 size = 0;
604 
605 		seed = early_memremap(efi_rng_seed, sizeof(*seed));
606 		if (seed != NULL) {
607 			size = READ_ONCE(seed->size);
608 			early_memunmap(seed, sizeof(*seed));
609 		} else {
610 			pr_err("Could not map UEFI random seed!\n");
611 		}
612 		if (size > 0) {
613 			seed = early_memremap(efi_rng_seed,
614 					      sizeof(*seed) + size);
615 			if (seed != NULL) {
616 				pr_notice("seeding entropy pool\n");
617 				add_bootloader_randomness(seed->bits, size);
618 				early_memunmap(seed, sizeof(*seed) + size);
619 			} else {
620 				pr_err("Could not map UEFI random seed!\n");
621 			}
622 		}
623 	}
624 
625 	if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
626 		efi_memattr_init();
627 
628 	efi_tpm_eventlog_init();
629 
630 	if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
631 		unsigned long prsv = mem_reserve;
632 
633 		while (prsv) {
634 			struct linux_efi_memreserve *rsv;
635 			u8 *p;
636 
637 			/*
638 			 * Just map a full page: that is what we will get
639 			 * anyway, and it permits us to map the entire entry
640 			 * before knowing its size.
641 			 */
642 			p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
643 					   PAGE_SIZE);
644 			if (p == NULL) {
645 				pr_err("Could not map UEFI memreserve entry!\n");
646 				return -ENOMEM;
647 			}
648 
649 			rsv = (void *)(p + prsv % PAGE_SIZE);
650 
651 			/* reserve the entry itself */
652 			memblock_reserve(prsv,
653 					 struct_size(rsv, entry, rsv->size));
654 
655 			for (i = 0; i < atomic_read(&rsv->count); i++) {
656 				memblock_reserve(rsv->entry[i].base,
657 						 rsv->entry[i].size);
658 			}
659 
660 			prsv = rsv->next;
661 			early_memunmap(p, PAGE_SIZE);
662 		}
663 	}
664 
665 	if (rt_prop != EFI_INVALID_TABLE_ADDR) {
666 		efi_rt_properties_table_t *tbl;
667 
668 		tbl = early_memremap(rt_prop, sizeof(*tbl));
669 		if (tbl) {
670 			efi.runtime_supported_mask &= tbl->runtime_services_supported;
671 			early_memunmap(tbl, sizeof(*tbl));
672 		}
673 	}
674 
675 	return 0;
676 }
677 
efi_systab_check_header(const efi_table_hdr_t * systab_hdr,int min_major_version)678 int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr,
679 				   int min_major_version)
680 {
681 	if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
682 		pr_err("System table signature incorrect!\n");
683 		return -EINVAL;
684 	}
685 
686 	if ((systab_hdr->revision >> 16) < min_major_version)
687 		pr_err("Warning: System table version %d.%02d, expected %d.00 or greater!\n",
688 		       systab_hdr->revision >> 16,
689 		       systab_hdr->revision & 0xffff,
690 		       min_major_version);
691 
692 	return 0;
693 }
694 
695 #ifndef CONFIG_IA64
map_fw_vendor(unsigned long fw_vendor,size_t size)696 static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
697 						size_t size)
698 {
699 	const efi_char16_t *ret;
700 
701 	ret = early_memremap_ro(fw_vendor, size);
702 	if (!ret)
703 		pr_err("Could not map the firmware vendor!\n");
704 	return ret;
705 }
706 
unmap_fw_vendor(const void * fw_vendor,size_t size)707 static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
708 {
709 	early_memunmap((void *)fw_vendor, size);
710 }
711 #else
712 #define map_fw_vendor(p, s)	__va(p)
713 #define unmap_fw_vendor(v, s)
714 #endif
715 
efi_systab_report_header(const efi_table_hdr_t * systab_hdr,unsigned long fw_vendor)716 void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
717 				     unsigned long fw_vendor)
718 {
719 	char vendor[100] = "unknown";
720 	const efi_char16_t *c16;
721 	size_t i;
722 
723 	c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
724 	if (c16) {
725 		for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
726 			vendor[i] = c16[i];
727 		vendor[i] = '\0';
728 
729 		unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
730 	}
731 
732 	pr_info("EFI v%u.%.02u by %s\n",
733 		systab_hdr->revision >> 16,
734 		systab_hdr->revision & 0xffff,
735 		vendor);
736 
737 	if (IS_ENABLED(CONFIG_X86_64) &&
738 	    systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
739 	    !strcmp(vendor, "Apple")) {
740 		pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n");
741 		efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
742 	}
743 }
744 
745 static __initdata char memory_type_name[][13] = {
746 	"Reserved",
747 	"Loader Code",
748 	"Loader Data",
749 	"Boot Code",
750 	"Boot Data",
751 	"Runtime Code",
752 	"Runtime Data",
753 	"Conventional",
754 	"Unusable",
755 	"ACPI Reclaim",
756 	"ACPI Mem NVS",
757 	"MMIO",
758 	"MMIO Port",
759 	"PAL Code",
760 	"Persistent",
761 };
762 
efi_md_typeattr_format(char * buf,size_t size,const efi_memory_desc_t * md)763 char * __init efi_md_typeattr_format(char *buf, size_t size,
764 				     const efi_memory_desc_t *md)
765 {
766 	char *pos;
767 	int type_len;
768 	u64 attr;
769 
770 	pos = buf;
771 	if (md->type >= ARRAY_SIZE(memory_type_name))
772 		type_len = snprintf(pos, size, "[type=%u", md->type);
773 	else
774 		type_len = snprintf(pos, size, "[%-*s",
775 				    (int)(sizeof(memory_type_name[0]) - 1),
776 				    memory_type_name[md->type]);
777 	if (type_len >= size)
778 		return buf;
779 
780 	pos += type_len;
781 	size -= type_len;
782 
783 	attr = md->attribute;
784 	if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
785 		     EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
786 		     EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
787 		     EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
788 		     EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
789 		snprintf(pos, size, "|attr=0x%016llx]",
790 			 (unsigned long long)attr);
791 	else
792 		snprintf(pos, size,
793 			 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
794 			 attr & EFI_MEMORY_RUNTIME		? "RUN" : "",
795 			 attr & EFI_MEMORY_MORE_RELIABLE	? "MR"  : "",
796 			 attr & EFI_MEMORY_CPU_CRYPTO   	? "CC"  : "",
797 			 attr & EFI_MEMORY_SP			? "SP"  : "",
798 			 attr & EFI_MEMORY_NV			? "NV"  : "",
799 			 attr & EFI_MEMORY_XP			? "XP"  : "",
800 			 attr & EFI_MEMORY_RP			? "RP"  : "",
801 			 attr & EFI_MEMORY_WP			? "WP"  : "",
802 			 attr & EFI_MEMORY_RO			? "RO"  : "",
803 			 attr & EFI_MEMORY_UCE			? "UCE" : "",
804 			 attr & EFI_MEMORY_WB			? "WB"  : "",
805 			 attr & EFI_MEMORY_WT			? "WT"  : "",
806 			 attr & EFI_MEMORY_WC			? "WC"  : "",
807 			 attr & EFI_MEMORY_UC			? "UC"  : "");
808 	return buf;
809 }
810 
811 /*
812  * IA64 has a funky EFI memory map that doesn't work the same way as
813  * other architectures.
814  */
815 #ifndef CONFIG_IA64
816 /*
817  * efi_mem_attributes - lookup memmap attributes for physical address
818  * @phys_addr: the physical address to lookup
819  *
820  * Search in the EFI memory map for the region covering
821  * @phys_addr. Returns the EFI memory attributes if the region
822  * was found in the memory map, 0 otherwise.
823  */
efi_mem_attributes(unsigned long phys_addr)824 u64 efi_mem_attributes(unsigned long phys_addr)
825 {
826 	efi_memory_desc_t *md;
827 
828 	if (!efi_enabled(EFI_MEMMAP))
829 		return 0;
830 
831 	for_each_efi_memory_desc(md) {
832 		if ((md->phys_addr <= phys_addr) &&
833 		    (phys_addr < (md->phys_addr +
834 		    (md->num_pages << EFI_PAGE_SHIFT))))
835 			return md->attribute;
836 	}
837 	return 0;
838 }
839 
840 /*
841  * efi_mem_type - lookup memmap type for physical address
842  * @phys_addr: the physical address to lookup
843  *
844  * Search in the EFI memory map for the region covering @phys_addr.
845  * Returns the EFI memory type if the region was found in the memory
846  * map, -EINVAL otherwise.
847  */
efi_mem_type(unsigned long phys_addr)848 int efi_mem_type(unsigned long phys_addr)
849 {
850 	const efi_memory_desc_t *md;
851 
852 	if (!efi_enabled(EFI_MEMMAP))
853 		return -ENOTSUPP;
854 
855 	for_each_efi_memory_desc(md) {
856 		if ((md->phys_addr <= phys_addr) &&
857 		    (phys_addr < (md->phys_addr +
858 				  (md->num_pages << EFI_PAGE_SHIFT))))
859 			return md->type;
860 	}
861 	return -EINVAL;
862 }
863 #endif
864 
efi_status_to_err(efi_status_t status)865 int efi_status_to_err(efi_status_t status)
866 {
867 	int err;
868 
869 	switch (status) {
870 	case EFI_SUCCESS:
871 		err = 0;
872 		break;
873 	case EFI_INVALID_PARAMETER:
874 		err = -EINVAL;
875 		break;
876 	case EFI_OUT_OF_RESOURCES:
877 		err = -ENOSPC;
878 		break;
879 	case EFI_DEVICE_ERROR:
880 		err = -EIO;
881 		break;
882 	case EFI_WRITE_PROTECTED:
883 		err = -EROFS;
884 		break;
885 	case EFI_SECURITY_VIOLATION:
886 		err = -EACCES;
887 		break;
888 	case EFI_NOT_FOUND:
889 		err = -ENOENT;
890 		break;
891 	case EFI_ABORTED:
892 		err = -EINTR;
893 		break;
894 	default:
895 		err = -EINVAL;
896 	}
897 
898 	return err;
899 }
900 
901 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
902 static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
903 
efi_memreserve_map_root(void)904 static int __init efi_memreserve_map_root(void)
905 {
906 	if (mem_reserve == EFI_INVALID_TABLE_ADDR)
907 		return -ENODEV;
908 
909 	efi_memreserve_root = memremap(mem_reserve,
910 				       sizeof(*efi_memreserve_root),
911 				       MEMREMAP_WB);
912 	if (WARN_ON_ONCE(!efi_memreserve_root))
913 		return -ENOMEM;
914 	return 0;
915 }
916 
efi_mem_reserve_iomem(phys_addr_t addr,u64 size)917 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
918 {
919 	struct resource *res, *parent;
920 	int ret;
921 
922 	res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
923 	if (!res)
924 		return -ENOMEM;
925 
926 	res->name	= "reserved";
927 	res->flags	= IORESOURCE_MEM;
928 	res->start	= addr;
929 	res->end	= addr + size - 1;
930 
931 	/* we expect a conflict with a 'System RAM' region */
932 	parent = request_resource_conflict(&iomem_resource, res);
933 	ret = parent ? request_resource(parent, res) : 0;
934 
935 	/*
936 	 * Given that efi_mem_reserve_iomem() can be called at any
937 	 * time, only call memblock_reserve() if the architecture
938 	 * keeps the infrastructure around.
939 	 */
940 	if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
941 		memblock_reserve(addr, size);
942 
943 	return ret;
944 }
945 
efi_mem_reserve_persistent(phys_addr_t addr,u64 size)946 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
947 {
948 	struct linux_efi_memreserve *rsv;
949 	unsigned long prsv;
950 	int rc, index;
951 
952 	if (efi_memreserve_root == (void *)ULONG_MAX)
953 		return -ENODEV;
954 
955 	if (!efi_memreserve_root) {
956 		rc = efi_memreserve_map_root();
957 		if (rc)
958 			return rc;
959 	}
960 
961 	/* first try to find a slot in an existing linked list entry */
962 	for (prsv = efi_memreserve_root->next; prsv; ) {
963 		rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
964 		index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
965 		if (index < rsv->size) {
966 			rsv->entry[index].base = addr;
967 			rsv->entry[index].size = size;
968 
969 			memunmap(rsv);
970 			return efi_mem_reserve_iomem(addr, size);
971 		}
972 		prsv = rsv->next;
973 		memunmap(rsv);
974 	}
975 
976 	/* no slot found - allocate a new linked list entry */
977 	rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
978 	if (!rsv)
979 		return -ENOMEM;
980 
981 	rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
982 	if (rc) {
983 		free_page((unsigned long)rsv);
984 		return rc;
985 	}
986 
987 	/*
988 	 * The memremap() call above assumes that a linux_efi_memreserve entry
989 	 * never crosses a page boundary, so let's ensure that this remains true
990 	 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
991 	 * using SZ_4K explicitly in the size calculation below.
992 	 */
993 	rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
994 	atomic_set(&rsv->count, 1);
995 	rsv->entry[0].base = addr;
996 	rsv->entry[0].size = size;
997 
998 	spin_lock(&efi_mem_reserve_persistent_lock);
999 	rsv->next = efi_memreserve_root->next;
1000 	efi_memreserve_root->next = __pa(rsv);
1001 	spin_unlock(&efi_mem_reserve_persistent_lock);
1002 
1003 	return efi_mem_reserve_iomem(addr, size);
1004 }
1005 
efi_memreserve_root_init(void)1006 static int __init efi_memreserve_root_init(void)
1007 {
1008 	if (efi_memreserve_root)
1009 		return 0;
1010 	if (efi_memreserve_map_root())
1011 		efi_memreserve_root = (void *)ULONG_MAX;
1012 	return 0;
1013 }
1014 early_initcall(efi_memreserve_root_init);
1015 
1016 #ifdef CONFIG_KEXEC
update_efi_random_seed(struct notifier_block * nb,unsigned long code,void * unused)1017 static int update_efi_random_seed(struct notifier_block *nb,
1018 				  unsigned long code, void *unused)
1019 {
1020 	struct linux_efi_random_seed *seed;
1021 	u32 size = 0;
1022 
1023 	if (!kexec_in_progress)
1024 		return NOTIFY_DONE;
1025 
1026 	seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
1027 	if (seed != NULL) {
1028 		size = min(seed->size, EFI_RANDOM_SEED_SIZE);
1029 		memunmap(seed);
1030 	} else {
1031 		pr_err("Could not map UEFI random seed!\n");
1032 	}
1033 	if (size > 0) {
1034 		seed = memremap(efi_rng_seed, sizeof(*seed) + size,
1035 				MEMREMAP_WB);
1036 		if (seed != NULL) {
1037 			seed->size = size;
1038 			get_random_bytes(seed->bits, seed->size);
1039 			memunmap(seed);
1040 		} else {
1041 			pr_err("Could not map UEFI random seed!\n");
1042 		}
1043 	}
1044 	return NOTIFY_DONE;
1045 }
1046 
1047 static struct notifier_block efi_random_seed_nb = {
1048 	.notifier_call = update_efi_random_seed,
1049 };
1050 
register_update_efi_random_seed(void)1051 static int __init register_update_efi_random_seed(void)
1052 {
1053 	if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
1054 		return 0;
1055 	return register_reboot_notifier(&efi_random_seed_nb);
1056 }
1057 late_initcall(register_update_efi_random_seed);
1058 #endif
1059