1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * apei-base.c - ACPI Platform Error Interface (APEI) supporting
4 * infrastructure
5 *
6 * APEI allows to report errors (for example from the chipset) to
7 * the operating system. This improves NMI handling especially. In
8 * addition it supports error serialization and error injection.
9 *
10 * For more information about APEI, please refer to ACPI Specification
11 * version 4.0, chapter 17.
12 *
13 * This file has Common functions used by more than one APEI table,
14 * including framework of interpreter for ERST and EINJ; resource
15 * management for APEI registers.
16 *
17 * Copyright (C) 2009, Intel Corp.
18 * Author: Huang Ying <ying.huang@intel.com>
19 */
20
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/init.h>
24 #include <linux/acpi.h>
25 #include <linux/slab.h>
26 #include <linux/io.h>
27 #include <linux/kref.h>
28 #include <linux/rculist.h>
29 #include <linux/interrupt.h>
30 #include <linux/debugfs.h>
31 #include <asm/unaligned.h>
32
33 #include "apei-internal.h"
34
35 #define APEI_PFX "APEI: "
36
37 /*
38 * APEI ERST (Error Record Serialization Table) and EINJ (Error
39 * INJection) interpreter framework.
40 */
41
42 #define APEI_EXEC_PRESERVE_REGISTER 0x1
43
apei_exec_ctx_init(struct apei_exec_context * ctx,struct apei_exec_ins_type * ins_table,u32 instructions,struct acpi_whea_header * action_table,u32 entries)44 void apei_exec_ctx_init(struct apei_exec_context *ctx,
45 struct apei_exec_ins_type *ins_table,
46 u32 instructions,
47 struct acpi_whea_header *action_table,
48 u32 entries)
49 {
50 ctx->ins_table = ins_table;
51 ctx->instructions = instructions;
52 ctx->action_table = action_table;
53 ctx->entries = entries;
54 }
55 EXPORT_SYMBOL_GPL(apei_exec_ctx_init);
56
__apei_exec_read_register(struct acpi_whea_header * entry,u64 * val)57 int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val)
58 {
59 int rc;
60
61 rc = apei_read(val, &entry->register_region);
62 if (rc)
63 return rc;
64 *val >>= entry->register_region.bit_offset;
65 *val &= entry->mask;
66
67 return 0;
68 }
69
apei_exec_read_register(struct apei_exec_context * ctx,struct acpi_whea_header * entry)70 int apei_exec_read_register(struct apei_exec_context *ctx,
71 struct acpi_whea_header *entry)
72 {
73 int rc;
74 u64 val = 0;
75
76 rc = __apei_exec_read_register(entry, &val);
77 if (rc)
78 return rc;
79 ctx->value = val;
80
81 return 0;
82 }
83 EXPORT_SYMBOL_GPL(apei_exec_read_register);
84
apei_exec_read_register_value(struct apei_exec_context * ctx,struct acpi_whea_header * entry)85 int apei_exec_read_register_value(struct apei_exec_context *ctx,
86 struct acpi_whea_header *entry)
87 {
88 int rc;
89
90 rc = apei_exec_read_register(ctx, entry);
91 if (rc)
92 return rc;
93 ctx->value = (ctx->value == entry->value);
94
95 return 0;
96 }
97 EXPORT_SYMBOL_GPL(apei_exec_read_register_value);
98
__apei_exec_write_register(struct acpi_whea_header * entry,u64 val)99 int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val)
100 {
101 int rc;
102
103 val &= entry->mask;
104 val <<= entry->register_region.bit_offset;
105 if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) {
106 u64 valr = 0;
107 rc = apei_read(&valr, &entry->register_region);
108 if (rc)
109 return rc;
110 valr &= ~(entry->mask << entry->register_region.bit_offset);
111 val |= valr;
112 }
113 rc = apei_write(val, &entry->register_region);
114
115 return rc;
116 }
117
apei_exec_write_register(struct apei_exec_context * ctx,struct acpi_whea_header * entry)118 int apei_exec_write_register(struct apei_exec_context *ctx,
119 struct acpi_whea_header *entry)
120 {
121 return __apei_exec_write_register(entry, ctx->value);
122 }
123 EXPORT_SYMBOL_GPL(apei_exec_write_register);
124
apei_exec_write_register_value(struct apei_exec_context * ctx,struct acpi_whea_header * entry)125 int apei_exec_write_register_value(struct apei_exec_context *ctx,
126 struct acpi_whea_header *entry)
127 {
128 ctx->value = entry->value;
129
130 return apei_exec_write_register(ctx, entry);
131 }
132 EXPORT_SYMBOL_GPL(apei_exec_write_register_value);
133
apei_exec_noop(struct apei_exec_context * ctx,struct acpi_whea_header * entry)134 int apei_exec_noop(struct apei_exec_context *ctx,
135 struct acpi_whea_header *entry)
136 {
137 return 0;
138 }
139 EXPORT_SYMBOL_GPL(apei_exec_noop);
140
141 /*
142 * Interpret the specified action. Go through whole action table,
143 * execute all instructions belong to the action.
144 */
__apei_exec_run(struct apei_exec_context * ctx,u8 action,bool optional)145 int __apei_exec_run(struct apei_exec_context *ctx, u8 action,
146 bool optional)
147 {
148 int rc = -ENOENT;
149 u32 i, ip;
150 struct acpi_whea_header *entry;
151 apei_exec_ins_func_t run;
152
153 ctx->ip = 0;
154
155 /*
156 * "ip" is the instruction pointer of current instruction,
157 * "ctx->ip" specifies the next instruction to executed,
158 * instruction "run" function may change the "ctx->ip" to
159 * implement "goto" semantics.
160 */
161 rewind:
162 ip = 0;
163 for (i = 0; i < ctx->entries; i++) {
164 entry = &ctx->action_table[i];
165 if (entry->action != action)
166 continue;
167 if (ip == ctx->ip) {
168 if (entry->instruction >= ctx->instructions ||
169 !ctx->ins_table[entry->instruction].run) {
170 pr_warn(FW_WARN APEI_PFX
171 "Invalid action table, unknown instruction type: %d\n",
172 entry->instruction);
173 return -EINVAL;
174 }
175 run = ctx->ins_table[entry->instruction].run;
176 rc = run(ctx, entry);
177 if (rc < 0)
178 return rc;
179 else if (rc != APEI_EXEC_SET_IP)
180 ctx->ip++;
181 }
182 ip++;
183 if (ctx->ip < ip)
184 goto rewind;
185 }
186
187 return !optional && rc < 0 ? rc : 0;
188 }
189 EXPORT_SYMBOL_GPL(__apei_exec_run);
190
191 typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx,
192 struct acpi_whea_header *entry,
193 void *data);
194
apei_exec_for_each_entry(struct apei_exec_context * ctx,apei_exec_entry_func_t func,void * data,int * end)195 static int apei_exec_for_each_entry(struct apei_exec_context *ctx,
196 apei_exec_entry_func_t func,
197 void *data,
198 int *end)
199 {
200 u8 ins;
201 int i, rc;
202 struct acpi_whea_header *entry;
203 struct apei_exec_ins_type *ins_table = ctx->ins_table;
204
205 for (i = 0; i < ctx->entries; i++) {
206 entry = ctx->action_table + i;
207 ins = entry->instruction;
208 if (end)
209 *end = i;
210 if (ins >= ctx->instructions || !ins_table[ins].run) {
211 pr_warn(FW_WARN APEI_PFX
212 "Invalid action table, unknown instruction type: %d\n",
213 ins);
214 return -EINVAL;
215 }
216 rc = func(ctx, entry, data);
217 if (rc)
218 return rc;
219 }
220
221 return 0;
222 }
223
pre_map_gar_callback(struct apei_exec_context * ctx,struct acpi_whea_header * entry,void * data)224 static int pre_map_gar_callback(struct apei_exec_context *ctx,
225 struct acpi_whea_header *entry,
226 void *data)
227 {
228 u8 ins = entry->instruction;
229
230 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
231 return apei_map_generic_address(&entry->register_region);
232
233 return 0;
234 }
235
236 /*
237 * Pre-map all GARs in action table to make it possible to access them
238 * in NMI handler.
239 */
apei_exec_pre_map_gars(struct apei_exec_context * ctx)240 int apei_exec_pre_map_gars(struct apei_exec_context *ctx)
241 {
242 int rc, end;
243
244 rc = apei_exec_for_each_entry(ctx, pre_map_gar_callback,
245 NULL, &end);
246 if (rc) {
247 struct apei_exec_context ctx_unmap;
248 memcpy(&ctx_unmap, ctx, sizeof(*ctx));
249 ctx_unmap.entries = end;
250 apei_exec_post_unmap_gars(&ctx_unmap);
251 }
252
253 return rc;
254 }
255 EXPORT_SYMBOL_GPL(apei_exec_pre_map_gars);
256
post_unmap_gar_callback(struct apei_exec_context * ctx,struct acpi_whea_header * entry,void * data)257 static int post_unmap_gar_callback(struct apei_exec_context *ctx,
258 struct acpi_whea_header *entry,
259 void *data)
260 {
261 u8 ins = entry->instruction;
262
263 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
264 apei_unmap_generic_address(&entry->register_region);
265
266 return 0;
267 }
268
269 /* Post-unmap all GAR in action table. */
apei_exec_post_unmap_gars(struct apei_exec_context * ctx)270 int apei_exec_post_unmap_gars(struct apei_exec_context *ctx)
271 {
272 return apei_exec_for_each_entry(ctx, post_unmap_gar_callback,
273 NULL, NULL);
274 }
275 EXPORT_SYMBOL_GPL(apei_exec_post_unmap_gars);
276
277 /*
278 * Resource management for GARs in APEI
279 */
280 struct apei_res {
281 struct list_head list;
282 unsigned long start;
283 unsigned long end;
284 };
285
286 /* Collect all resources requested, to avoid conflict */
287 static struct apei_resources apei_resources_all = {
288 .iomem = LIST_HEAD_INIT(apei_resources_all.iomem),
289 .ioport = LIST_HEAD_INIT(apei_resources_all.ioport),
290 };
291
apei_res_add(struct list_head * res_list,unsigned long start,unsigned long size)292 static int apei_res_add(struct list_head *res_list,
293 unsigned long start, unsigned long size)
294 {
295 struct apei_res *res, *resn, *res_ins = NULL;
296 unsigned long end = start + size;
297
298 if (end <= start)
299 return 0;
300 repeat:
301 list_for_each_entry_safe(res, resn, res_list, list) {
302 if (res->start > end || res->end < start)
303 continue;
304 else if (end <= res->end && start >= res->start) {
305 kfree(res_ins);
306 return 0;
307 }
308 list_del(&res->list);
309 res->start = start = min(res->start, start);
310 res->end = end = max(res->end, end);
311 kfree(res_ins);
312 res_ins = res;
313 goto repeat;
314 }
315
316 if (res_ins)
317 list_add(&res_ins->list, res_list);
318 else {
319 res_ins = kmalloc(sizeof(*res_ins), GFP_KERNEL);
320 if (!res_ins)
321 return -ENOMEM;
322 res_ins->start = start;
323 res_ins->end = end;
324 list_add(&res_ins->list, res_list);
325 }
326
327 return 0;
328 }
329
apei_res_sub(struct list_head * res_list1,struct list_head * res_list2)330 static int apei_res_sub(struct list_head *res_list1,
331 struct list_head *res_list2)
332 {
333 struct apei_res *res1, *resn1, *res2, *res;
334 res1 = list_entry(res_list1->next, struct apei_res, list);
335 resn1 = list_entry(res1->list.next, struct apei_res, list);
336 while (&res1->list != res_list1) {
337 list_for_each_entry(res2, res_list2, list) {
338 if (res1->start >= res2->end ||
339 res1->end <= res2->start)
340 continue;
341 else if (res1->end <= res2->end &&
342 res1->start >= res2->start) {
343 list_del(&res1->list);
344 kfree(res1);
345 break;
346 } else if (res1->end > res2->end &&
347 res1->start < res2->start) {
348 res = kmalloc(sizeof(*res), GFP_KERNEL);
349 if (!res)
350 return -ENOMEM;
351 res->start = res2->end;
352 res->end = res1->end;
353 res1->end = res2->start;
354 list_add(&res->list, &res1->list);
355 resn1 = res;
356 } else {
357 if (res1->start < res2->start)
358 res1->end = res2->start;
359 else
360 res1->start = res2->end;
361 }
362 }
363 res1 = resn1;
364 resn1 = list_entry(resn1->list.next, struct apei_res, list);
365 }
366
367 return 0;
368 }
369
apei_res_clean(struct list_head * res_list)370 static void apei_res_clean(struct list_head *res_list)
371 {
372 struct apei_res *res, *resn;
373
374 list_for_each_entry_safe(res, resn, res_list, list) {
375 list_del(&res->list);
376 kfree(res);
377 }
378 }
379
apei_resources_fini(struct apei_resources * resources)380 void apei_resources_fini(struct apei_resources *resources)
381 {
382 apei_res_clean(&resources->iomem);
383 apei_res_clean(&resources->ioport);
384 }
385 EXPORT_SYMBOL_GPL(apei_resources_fini);
386
apei_resources_merge(struct apei_resources * resources1,struct apei_resources * resources2)387 static int apei_resources_merge(struct apei_resources *resources1,
388 struct apei_resources *resources2)
389 {
390 int rc;
391 struct apei_res *res;
392
393 list_for_each_entry(res, &resources2->iomem, list) {
394 rc = apei_res_add(&resources1->iomem, res->start,
395 res->end - res->start);
396 if (rc)
397 return rc;
398 }
399 list_for_each_entry(res, &resources2->ioport, list) {
400 rc = apei_res_add(&resources1->ioport, res->start,
401 res->end - res->start);
402 if (rc)
403 return rc;
404 }
405
406 return 0;
407 }
408
apei_resources_add(struct apei_resources * resources,unsigned long start,unsigned long size,bool iomem)409 int apei_resources_add(struct apei_resources *resources,
410 unsigned long start, unsigned long size,
411 bool iomem)
412 {
413 if (iomem)
414 return apei_res_add(&resources->iomem, start, size);
415 else
416 return apei_res_add(&resources->ioport, start, size);
417 }
418 EXPORT_SYMBOL_GPL(apei_resources_add);
419
420 /*
421 * EINJ has two groups of GARs (EINJ table entry and trigger table
422 * entry), so common resources are subtracted from the trigger table
423 * resources before the second requesting.
424 */
apei_resources_sub(struct apei_resources * resources1,struct apei_resources * resources2)425 int apei_resources_sub(struct apei_resources *resources1,
426 struct apei_resources *resources2)
427 {
428 int rc;
429
430 rc = apei_res_sub(&resources1->iomem, &resources2->iomem);
431 if (rc)
432 return rc;
433 return apei_res_sub(&resources1->ioport, &resources2->ioport);
434 }
435 EXPORT_SYMBOL_GPL(apei_resources_sub);
436
apei_get_res_callback(__u64 start,__u64 size,void * data)437 static int apei_get_res_callback(__u64 start, __u64 size, void *data)
438 {
439 struct apei_resources *resources = data;
440 return apei_res_add(&resources->iomem, start, size);
441 }
442
apei_get_nvs_resources(struct apei_resources * resources)443 static int apei_get_nvs_resources(struct apei_resources *resources)
444 {
445 return acpi_nvs_for_each_region(apei_get_res_callback, resources);
446 }
447
448 int (*arch_apei_filter_addr)(int (*func)(__u64 start, __u64 size,
449 void *data), void *data);
apei_get_arch_resources(struct apei_resources * resources)450 static int apei_get_arch_resources(struct apei_resources *resources)
451
452 {
453 return arch_apei_filter_addr(apei_get_res_callback, resources);
454 }
455
456 /*
457 * IO memory/port resource management mechanism is used to check
458 * whether memory/port area used by GARs conflicts with normal memory
459 * or IO memory/port of devices.
460 */
apei_resources_request(struct apei_resources * resources,const char * desc)461 int apei_resources_request(struct apei_resources *resources,
462 const char *desc)
463 {
464 struct apei_res *res, *res_bak = NULL;
465 struct resource *r;
466 struct apei_resources nvs_resources, arch_res;
467 int rc;
468
469 rc = apei_resources_sub(resources, &apei_resources_all);
470 if (rc)
471 return rc;
472
473 /*
474 * Some firmware uses ACPI NVS region, that has been marked as
475 * busy, so exclude it from APEI resources to avoid false
476 * conflict.
477 */
478 apei_resources_init(&nvs_resources);
479 rc = apei_get_nvs_resources(&nvs_resources);
480 if (rc)
481 goto nvs_res_fini;
482 rc = apei_resources_sub(resources, &nvs_resources);
483 if (rc)
484 goto nvs_res_fini;
485
486 if (arch_apei_filter_addr) {
487 apei_resources_init(&arch_res);
488 rc = apei_get_arch_resources(&arch_res);
489 if (rc)
490 goto arch_res_fini;
491 rc = apei_resources_sub(resources, &arch_res);
492 if (rc)
493 goto arch_res_fini;
494 }
495
496 rc = -EINVAL;
497 list_for_each_entry(res, &resources->iomem, list) {
498 r = request_mem_region(res->start, res->end - res->start,
499 desc);
500 if (!r) {
501 pr_err(APEI_PFX
502 "Can not request [mem %#010llx-%#010llx] for %s registers\n",
503 (unsigned long long)res->start,
504 (unsigned long long)res->end - 1, desc);
505 res_bak = res;
506 goto err_unmap_iomem;
507 }
508 }
509
510 list_for_each_entry(res, &resources->ioport, list) {
511 r = request_region(res->start, res->end - res->start, desc);
512 if (!r) {
513 pr_err(APEI_PFX
514 "Can not request [io %#06llx-%#06llx] for %s registers\n",
515 (unsigned long long)res->start,
516 (unsigned long long)res->end - 1, desc);
517 res_bak = res;
518 goto err_unmap_ioport;
519 }
520 }
521
522 rc = apei_resources_merge(&apei_resources_all, resources);
523 if (rc) {
524 pr_err(APEI_PFX "Fail to merge resources!\n");
525 goto err_unmap_ioport;
526 }
527
528 goto arch_res_fini;
529
530 err_unmap_ioport:
531 list_for_each_entry(res, &resources->ioport, list) {
532 if (res == res_bak)
533 break;
534 release_region(res->start, res->end - res->start);
535 }
536 res_bak = NULL;
537 err_unmap_iomem:
538 list_for_each_entry(res, &resources->iomem, list) {
539 if (res == res_bak)
540 break;
541 release_mem_region(res->start, res->end - res->start);
542 }
543 arch_res_fini:
544 if (arch_apei_filter_addr)
545 apei_resources_fini(&arch_res);
546 nvs_res_fini:
547 apei_resources_fini(&nvs_resources);
548 return rc;
549 }
550 EXPORT_SYMBOL_GPL(apei_resources_request);
551
apei_resources_release(struct apei_resources * resources)552 void apei_resources_release(struct apei_resources *resources)
553 {
554 int rc;
555 struct apei_res *res;
556
557 list_for_each_entry(res, &resources->iomem, list)
558 release_mem_region(res->start, res->end - res->start);
559 list_for_each_entry(res, &resources->ioport, list)
560 release_region(res->start, res->end - res->start);
561
562 rc = apei_resources_sub(&apei_resources_all, resources);
563 if (rc)
564 pr_err(APEI_PFX "Fail to sub resources!\n");
565 }
566 EXPORT_SYMBOL_GPL(apei_resources_release);
567
apei_check_gar(struct acpi_generic_address * reg,u64 * paddr,u32 * access_bit_width)568 static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
569 u32 *access_bit_width)
570 {
571 u32 bit_width, bit_offset, access_size_code, space_id;
572
573 bit_width = reg->bit_width;
574 bit_offset = reg->bit_offset;
575 access_size_code = reg->access_width;
576 space_id = reg->space_id;
577 *paddr = get_unaligned(®->address);
578 if (!*paddr) {
579 pr_warn(FW_BUG APEI_PFX
580 "Invalid physical address in GAR [0x%llx/%u/%u/%u/%u]\n",
581 *paddr, bit_width, bit_offset, access_size_code,
582 space_id);
583 return -EINVAL;
584 }
585
586 if (access_size_code < 1 || access_size_code > 4) {
587 pr_warn(FW_BUG APEI_PFX
588 "Invalid access size code in GAR [0x%llx/%u/%u/%u/%u]\n",
589 *paddr, bit_width, bit_offset, access_size_code,
590 space_id);
591 return -EINVAL;
592 }
593 *access_bit_width = 1UL << (access_size_code + 2);
594
595 /* Fixup common BIOS bug */
596 if (bit_width == 32 && bit_offset == 0 && (*paddr & 0x03) == 0 &&
597 *access_bit_width < 32)
598 *access_bit_width = 32;
599 else if (bit_width == 64 && bit_offset == 0 && (*paddr & 0x07) == 0 &&
600 *access_bit_width < 64)
601 *access_bit_width = 64;
602
603 if ((bit_width + bit_offset) > *access_bit_width) {
604 pr_warn(FW_BUG APEI_PFX
605 "Invalid bit width + offset in GAR [0x%llx/%u/%u/%u/%u]\n",
606 *paddr, bit_width, bit_offset, access_size_code,
607 space_id);
608 return -EINVAL;
609 }
610
611 if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
612 space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
613 pr_warn(FW_BUG APEI_PFX
614 "Invalid address space type in GAR [0x%llx/%u/%u/%u/%u]\n",
615 *paddr, bit_width, bit_offset, access_size_code,
616 space_id);
617 return -EINVAL;
618 }
619
620 return 0;
621 }
622
apei_map_generic_address(struct acpi_generic_address * reg)623 int apei_map_generic_address(struct acpi_generic_address *reg)
624 {
625 int rc;
626 u32 access_bit_width;
627 u64 address;
628
629 rc = apei_check_gar(reg, &address, &access_bit_width);
630 if (rc)
631 return rc;
632
633 /* IO space doesn't need mapping */
634 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
635 return 0;
636
637 if (!acpi_os_map_generic_address(reg))
638 return -ENXIO;
639
640 return 0;
641 }
642 EXPORT_SYMBOL_GPL(apei_map_generic_address);
643
644 /* read GAR in interrupt (including NMI) or process context */
apei_read(u64 * val,struct acpi_generic_address * reg)645 int apei_read(u64 *val, struct acpi_generic_address *reg)
646 {
647 int rc;
648 u32 access_bit_width;
649 u64 address;
650 acpi_status status;
651
652 rc = apei_check_gar(reg, &address, &access_bit_width);
653 if (rc)
654 return rc;
655
656 *val = 0;
657 switch(reg->space_id) {
658 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
659 status = acpi_os_read_memory((acpi_physical_address) address,
660 val, access_bit_width);
661 if (ACPI_FAILURE(status))
662 return -EIO;
663 break;
664 case ACPI_ADR_SPACE_SYSTEM_IO:
665 status = acpi_os_read_port(address, (u32 *)val,
666 access_bit_width);
667 if (ACPI_FAILURE(status))
668 return -EIO;
669 break;
670 default:
671 return -EINVAL;
672 }
673
674 return 0;
675 }
676 EXPORT_SYMBOL_GPL(apei_read);
677
678 /* write GAR in interrupt (including NMI) or process context */
apei_write(u64 val,struct acpi_generic_address * reg)679 int apei_write(u64 val, struct acpi_generic_address *reg)
680 {
681 int rc;
682 u32 access_bit_width;
683 u64 address;
684 acpi_status status;
685
686 rc = apei_check_gar(reg, &address, &access_bit_width);
687 if (rc)
688 return rc;
689
690 switch (reg->space_id) {
691 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
692 status = acpi_os_write_memory((acpi_physical_address) address,
693 val, access_bit_width);
694 if (ACPI_FAILURE(status))
695 return -EIO;
696 break;
697 case ACPI_ADR_SPACE_SYSTEM_IO:
698 status = acpi_os_write_port(address, val, access_bit_width);
699 if (ACPI_FAILURE(status))
700 return -EIO;
701 break;
702 default:
703 return -EINVAL;
704 }
705
706 return 0;
707 }
708 EXPORT_SYMBOL_GPL(apei_write);
709
collect_res_callback(struct apei_exec_context * ctx,struct acpi_whea_header * entry,void * data)710 static int collect_res_callback(struct apei_exec_context *ctx,
711 struct acpi_whea_header *entry,
712 void *data)
713 {
714 struct apei_resources *resources = data;
715 struct acpi_generic_address *reg = &entry->register_region;
716 u8 ins = entry->instruction;
717 u32 access_bit_width;
718 u64 paddr;
719 int rc;
720
721 if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER))
722 return 0;
723
724 rc = apei_check_gar(reg, &paddr, &access_bit_width);
725 if (rc)
726 return rc;
727
728 switch (reg->space_id) {
729 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
730 return apei_res_add(&resources->iomem, paddr,
731 access_bit_width / 8);
732 case ACPI_ADR_SPACE_SYSTEM_IO:
733 return apei_res_add(&resources->ioport, paddr,
734 access_bit_width / 8);
735 default:
736 return -EINVAL;
737 }
738 }
739
740 /*
741 * Same register may be used by multiple instructions in GARs, so
742 * resources are collected before requesting.
743 */
apei_exec_collect_resources(struct apei_exec_context * ctx,struct apei_resources * resources)744 int apei_exec_collect_resources(struct apei_exec_context *ctx,
745 struct apei_resources *resources)
746 {
747 return apei_exec_for_each_entry(ctx, collect_res_callback,
748 resources, NULL);
749 }
750 EXPORT_SYMBOL_GPL(apei_exec_collect_resources);
751
apei_get_debugfs_dir(void)752 struct dentry *apei_get_debugfs_dir(void)
753 {
754 static struct dentry *dapei;
755
756 if (!dapei)
757 dapei = debugfs_create_dir("apei", NULL);
758
759 return dapei;
760 }
761 EXPORT_SYMBOL_GPL(apei_get_debugfs_dir);
762
arch_apei_enable_cmcff(struct acpi_hest_header * hest_hdr,void * data)763 int __weak arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr,
764 void *data)
765 {
766 return 1;
767 }
768 EXPORT_SYMBOL_GPL(arch_apei_enable_cmcff);
769
arch_apei_report_mem_error(int sev,struct cper_sec_mem_err * mem_err)770 void __weak arch_apei_report_mem_error(int sev,
771 struct cper_sec_mem_err *mem_err)
772 {
773 }
774 EXPORT_SYMBOL_GPL(arch_apei_report_mem_error);
775
apei_osc_setup(void)776 int apei_osc_setup(void)
777 {
778 static u8 whea_uuid_str[] = "ed855e0c-6c90-47bf-a62a-26de0fc5ad5c";
779 acpi_handle handle;
780 u32 capbuf[3];
781 struct acpi_osc_context context = {
782 .uuid_str = whea_uuid_str,
783 .rev = 1,
784 .cap.length = sizeof(capbuf),
785 .cap.pointer = capbuf,
786 };
787
788 capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
789 capbuf[OSC_SUPPORT_DWORD] = 1;
790 capbuf[OSC_CONTROL_DWORD] = 0;
791
792 if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))
793 || ACPI_FAILURE(acpi_run_osc(handle, &context)))
794 return -EIO;
795 else {
796 kfree(context.ret.pointer);
797 return 0;
798 }
799 }
800 EXPORT_SYMBOL_GPL(apei_osc_setup);
801