1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
30 #include <linux/pm_runtime.h>
31 
32 #include "amdgpu.h"
33 #include "amdgpu_ras.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "amdgpu_xgmi.h"
36 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
37 #include "nbio_v4_3.h"
38 #include "nbio_v7_9.h"
39 #include "atom.h"
40 #include "amdgpu_reset.h"
41 
42 #ifdef CONFIG_X86_MCE_AMD
43 #include <asm/mce.h>
44 
45 static bool notifier_registered;
46 #endif
47 static const char *RAS_FS_NAME = "ras";
48 
49 const char *ras_error_string[] = {
50 	"none",
51 	"parity",
52 	"single_correctable",
53 	"multi_uncorrectable",
54 	"poison",
55 };
56 
57 const char *ras_block_string[] = {
58 	"umc",
59 	"sdma",
60 	"gfx",
61 	"mmhub",
62 	"athub",
63 	"pcie_bif",
64 	"hdp",
65 	"xgmi_wafl",
66 	"df",
67 	"smn",
68 	"sem",
69 	"mp0",
70 	"mp1",
71 	"fuse",
72 	"mca",
73 	"vcn",
74 	"jpeg",
75 };
76 
77 const char *ras_mca_block_string[] = {
78 	"mca_mp0",
79 	"mca_mp1",
80 	"mca_mpio",
81 	"mca_iohc",
82 };
83 
84 struct amdgpu_ras_block_list {
85 	/* ras block link */
86 	struct list_head node;
87 
88 	struct amdgpu_ras_block_object *ras_obj;
89 };
90 
get_ras_block_str(struct ras_common_if * ras_block)91 const char *get_ras_block_str(struct ras_common_if *ras_block)
92 {
93 	if (!ras_block)
94 		return "NULL";
95 
96 	if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT)
97 		return "OUT OF RANGE";
98 
99 	if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
100 		return ras_mca_block_string[ras_block->sub_block_index];
101 
102 	return ras_block_string[ras_block->block];
103 }
104 
105 #define ras_block_str(_BLOCK_) \
106 	(((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
107 
108 #define ras_err_str(i) (ras_error_string[ffs(i)])
109 
110 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
111 
112 /* inject address is 52 bits */
113 #define	RAS_UMC_INJECT_ADDR_LIMIT	(0x1ULL << 52)
114 
115 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */
116 #define RAS_BAD_PAGE_COVER              (100 * 1024 * 1024ULL)
117 
118 enum amdgpu_ras_retire_page_reservation {
119 	AMDGPU_RAS_RETIRE_PAGE_RESERVED,
120 	AMDGPU_RAS_RETIRE_PAGE_PENDING,
121 	AMDGPU_RAS_RETIRE_PAGE_FAULT,
122 };
123 
124 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
125 
126 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
127 				uint64_t addr);
128 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
129 				uint64_t addr);
130 #ifdef CONFIG_X86_MCE_AMD
131 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
132 struct mce_notifier_adev_list {
133 	struct amdgpu_device *devs[MAX_GPU_INSTANCE];
134 	int num_gpu;
135 };
136 static struct mce_notifier_adev_list mce_adev_list;
137 #endif
138 
amdgpu_ras_set_error_query_ready(struct amdgpu_device * adev,bool ready)139 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
140 {
141 	if (adev && amdgpu_ras_get_context(adev))
142 		amdgpu_ras_get_context(adev)->error_query_ready = ready;
143 }
144 
amdgpu_ras_get_error_query_ready(struct amdgpu_device * adev)145 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
146 {
147 	if (adev && amdgpu_ras_get_context(adev))
148 		return amdgpu_ras_get_context(adev)->error_query_ready;
149 
150 	return false;
151 }
152 
amdgpu_reserve_page_direct(struct amdgpu_device * adev,uint64_t address)153 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
154 {
155 	struct ras_err_data err_data = {0, 0, 0, NULL};
156 	struct eeprom_table_record err_rec;
157 
158 	if ((address >= adev->gmc.mc_vram_size) ||
159 	    (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
160 		dev_warn(adev->dev,
161 		         "RAS WARN: input address 0x%llx is invalid.\n",
162 		         address);
163 		return -EINVAL;
164 	}
165 
166 	if (amdgpu_ras_check_bad_page(adev, address)) {
167 		dev_warn(adev->dev,
168 			 "RAS WARN: 0x%llx has already been marked as bad page!\n",
169 			 address);
170 		return 0;
171 	}
172 
173 	memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
174 	err_data.err_addr = &err_rec;
175 	amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0);
176 
177 	if (amdgpu_bad_page_threshold != 0) {
178 		amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
179 					 err_data.err_addr_cnt);
180 		amdgpu_ras_save_bad_pages(adev, NULL);
181 	}
182 
183 	dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
184 	dev_warn(adev->dev, "Clear EEPROM:\n");
185 	dev_warn(adev->dev, "    echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
186 
187 	return 0;
188 }
189 
amdgpu_ras_debugfs_read(struct file * f,char __user * buf,size_t size,loff_t * pos)190 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
191 					size_t size, loff_t *pos)
192 {
193 	struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
194 	struct ras_query_if info = {
195 		.head = obj->head,
196 	};
197 	ssize_t s;
198 	char val[128];
199 
200 	if (amdgpu_ras_query_error_status(obj->adev, &info))
201 		return -EINVAL;
202 
203 	/* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
204 	if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
205 	    obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
206 		if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
207 			dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
208 	}
209 
210 	s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
211 			"ue", info.ue_count,
212 			"ce", info.ce_count);
213 	if (*pos >= s)
214 		return 0;
215 
216 	s -= *pos;
217 	s = min_t(u64, s, size);
218 
219 
220 	if (copy_to_user(buf, &val[*pos], s))
221 		return -EINVAL;
222 
223 	*pos += s;
224 
225 	return s;
226 }
227 
228 static const struct file_operations amdgpu_ras_debugfs_ops = {
229 	.owner = THIS_MODULE,
230 	.read = amdgpu_ras_debugfs_read,
231 	.write = NULL,
232 	.llseek = default_llseek
233 };
234 
amdgpu_ras_find_block_id_by_name(const char * name,int * block_id)235 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
236 {
237 	int i;
238 
239 	for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
240 		*block_id = i;
241 		if (strcmp(name, ras_block_string[i]) == 0)
242 			return 0;
243 	}
244 	return -EINVAL;
245 }
246 
amdgpu_ras_debugfs_ctrl_parse_data(struct file * f,const char __user * buf,size_t size,loff_t * pos,struct ras_debug_if * data)247 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
248 		const char __user *buf, size_t size,
249 		loff_t *pos, struct ras_debug_if *data)
250 {
251 	ssize_t s = min_t(u64, 64, size);
252 	char str[65];
253 	char block_name[33];
254 	char err[9] = "ue";
255 	int op = -1;
256 	int block_id;
257 	uint32_t sub_block;
258 	u64 address, value;
259 	/* default value is 0 if the mask is not set by user */
260 	u32 instance_mask = 0;
261 
262 	if (*pos)
263 		return -EINVAL;
264 	*pos = size;
265 
266 	memset(str, 0, sizeof(str));
267 	memset(data, 0, sizeof(*data));
268 
269 	if (copy_from_user(str, buf, s))
270 		return -EINVAL;
271 
272 	if (sscanf(str, "disable %32s", block_name) == 1)
273 		op = 0;
274 	else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
275 		op = 1;
276 	else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
277 		op = 2;
278 	else if (strstr(str, "retire_page") != NULL)
279 		op = 3;
280 	else if (str[0] && str[1] && str[2] && str[3])
281 		/* ascii string, but commands are not matched. */
282 		return -EINVAL;
283 
284 	if (op != -1) {
285 		if (op == 3) {
286 			if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
287 			    sscanf(str, "%*s %llu", &address) != 1)
288 				return -EINVAL;
289 
290 			data->op = op;
291 			data->inject.address = address;
292 
293 			return 0;
294 		}
295 
296 		if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
297 			return -EINVAL;
298 
299 		data->head.block = block_id;
300 		/* only ue and ce errors are supported */
301 		if (!memcmp("ue", err, 2))
302 			data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
303 		else if (!memcmp("ce", err, 2))
304 			data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
305 		else
306 			return -EINVAL;
307 
308 		data->op = op;
309 
310 		if (op == 2) {
311 			if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x",
312 				   &sub_block, &address, &value, &instance_mask) != 4 &&
313 			    sscanf(str, "%*s %*s %*s %u %llu %llu %u",
314 				   &sub_block, &address, &value, &instance_mask) != 4 &&
315 				sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
316 				   &sub_block, &address, &value) != 3 &&
317 			    sscanf(str, "%*s %*s %*s %u %llu %llu",
318 				   &sub_block, &address, &value) != 3)
319 				return -EINVAL;
320 			data->head.sub_block_index = sub_block;
321 			data->inject.address = address;
322 			data->inject.value = value;
323 			data->inject.instance_mask = instance_mask;
324 		}
325 	} else {
326 		if (size < sizeof(*data))
327 			return -EINVAL;
328 
329 		if (copy_from_user(data, buf, sizeof(*data)))
330 			return -EINVAL;
331 	}
332 
333 	return 0;
334 }
335 
amdgpu_ras_instance_mask_check(struct amdgpu_device * adev,struct ras_debug_if * data)336 static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
337 				struct ras_debug_if *data)
338 {
339 	int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
340 	uint32_t mask, inst_mask = data->inject.instance_mask;
341 
342 	/* no need to set instance mask if there is only one instance */
343 	if (num_xcc <= 1 && inst_mask) {
344 		data->inject.instance_mask = 0;
345 		dev_dbg(adev->dev,
346 			"RAS inject mask(0x%x) isn't supported and force it to 0.\n",
347 			inst_mask);
348 
349 		return;
350 	}
351 
352 	switch (data->head.block) {
353 	case AMDGPU_RAS_BLOCK__GFX:
354 		mask = GENMASK(num_xcc - 1, 0);
355 		break;
356 	case AMDGPU_RAS_BLOCK__SDMA:
357 		mask = GENMASK(adev->sdma.num_instances - 1, 0);
358 		break;
359 	case AMDGPU_RAS_BLOCK__VCN:
360 	case AMDGPU_RAS_BLOCK__JPEG:
361 		mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0);
362 		break;
363 	default:
364 		mask = inst_mask;
365 		break;
366 	}
367 
368 	/* remove invalid bits in instance mask */
369 	data->inject.instance_mask &= mask;
370 	if (inst_mask != data->inject.instance_mask)
371 		dev_dbg(adev->dev,
372 			"Adjust RAS inject mask 0x%x to 0x%x\n",
373 			inst_mask, data->inject.instance_mask);
374 }
375 
376 /**
377  * DOC: AMDGPU RAS debugfs control interface
378  *
379  * The control interface accepts struct ras_debug_if which has two members.
380  *
381  * First member: ras_debug_if::head or ras_debug_if::inject.
382  *
383  * head is used to indicate which IP block will be under control.
384  *
385  * head has four members, they are block, type, sub_block_index, name.
386  * block: which IP will be under control.
387  * type: what kind of error will be enabled/disabled/injected.
388  * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
389  * name: the name of IP.
390  *
391  * inject has three more members than head, they are address, value and mask.
392  * As their names indicate, inject operation will write the
393  * value to the address.
394  *
395  * The second member: struct ras_debug_if::op.
396  * It has three kinds of operations.
397  *
398  * - 0: disable RAS on the block. Take ::head as its data.
399  * - 1: enable RAS on the block. Take ::head as its data.
400  * - 2: inject errors on the block. Take ::inject as its data.
401  *
402  * How to use the interface?
403  *
404  * In a program
405  *
406  * Copy the struct ras_debug_if in your code and initialize it.
407  * Write the struct to the control interface.
408  *
409  * From shell
410  *
411  * .. code-block:: bash
412  *
413  *	echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
414  *	echo "enable  <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
415  *	echo "inject  <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
416  *
417  * Where N, is the card which you want to affect.
418  *
419  * "disable" requires only the block.
420  * "enable" requires the block and error type.
421  * "inject" requires the block, error type, address, and value.
422  *
423  * The block is one of: umc, sdma, gfx, etc.
424  *	see ras_block_string[] for details
425  *
426  * The error type is one of: ue, ce, where,
427  *	ue is multi-uncorrectable
428  *	ce is single-correctable
429  *
430  * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
431  * The address and value are hexadecimal numbers, leading 0x is optional.
432  * The mask means instance mask, is optional, default value is 0x1.
433  *
434  * For instance,
435  *
436  * .. code-block:: bash
437  *
438  *	echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
439  *	echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl
440  *	echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
441  *
442  * How to check the result of the operation?
443  *
444  * To check disable/enable, see "ras" features at,
445  * /sys/class/drm/card[0/1/2...]/device/ras/features
446  *
447  * To check inject, see the corresponding error count at,
448  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
449  *
450  * .. note::
451  *	Operations are only allowed on blocks which are supported.
452  *	Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
453  *	to see which blocks support RAS on a particular asic.
454  *
455  */
amdgpu_ras_debugfs_ctrl_write(struct file * f,const char __user * buf,size_t size,loff_t * pos)456 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
457 					     const char __user *buf,
458 					     size_t size, loff_t *pos)
459 {
460 	struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
461 	struct ras_debug_if data;
462 	int ret = 0;
463 
464 	if (!amdgpu_ras_get_error_query_ready(adev)) {
465 		dev_warn(adev->dev, "RAS WARN: error injection "
466 				"currently inaccessible\n");
467 		return size;
468 	}
469 
470 	ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
471 	if (ret)
472 		return ret;
473 
474 	if (data.op == 3) {
475 		ret = amdgpu_reserve_page_direct(adev, data.inject.address);
476 		if (!ret)
477 			return size;
478 		else
479 			return ret;
480 	}
481 
482 	if (!amdgpu_ras_is_supported(adev, data.head.block))
483 		return -EINVAL;
484 
485 	switch (data.op) {
486 	case 0:
487 		ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
488 		break;
489 	case 1:
490 		ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
491 		break;
492 	case 2:
493 		if ((data.inject.address >= adev->gmc.mc_vram_size &&
494 		    adev->gmc.mc_vram_size) ||
495 		    (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
496 			dev_warn(adev->dev, "RAS WARN: input address "
497 					"0x%llx is invalid.",
498 					data.inject.address);
499 			ret = -EINVAL;
500 			break;
501 		}
502 
503 		/* umc ce/ue error injection for a bad page is not allowed */
504 		if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
505 		    amdgpu_ras_check_bad_page(adev, data.inject.address)) {
506 			dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
507 				 "already been marked as bad!\n",
508 				 data.inject.address);
509 			break;
510 		}
511 
512 		amdgpu_ras_instance_mask_check(adev, &data);
513 
514 		/* data.inject.address is offset instead of absolute gpu address */
515 		ret = amdgpu_ras_error_inject(adev, &data.inject);
516 		break;
517 	default:
518 		ret = -EINVAL;
519 		break;
520 	}
521 
522 	if (ret)
523 		return ret;
524 
525 	return size;
526 }
527 
528 /**
529  * DOC: AMDGPU RAS debugfs EEPROM table reset interface
530  *
531  * Some boards contain an EEPROM which is used to persistently store a list of
532  * bad pages which experiences ECC errors in vram.  This interface provides
533  * a way to reset the EEPROM, e.g., after testing error injection.
534  *
535  * Usage:
536  *
537  * .. code-block:: bash
538  *
539  *	echo 1 > ../ras/ras_eeprom_reset
540  *
541  * will reset EEPROM table to 0 entries.
542  *
543  */
amdgpu_ras_debugfs_eeprom_write(struct file * f,const char __user * buf,size_t size,loff_t * pos)544 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
545 					       const char __user *buf,
546 					       size_t size, loff_t *pos)
547 {
548 	struct amdgpu_device *adev =
549 		(struct amdgpu_device *)file_inode(f)->i_private;
550 	int ret;
551 
552 	ret = amdgpu_ras_eeprom_reset_table(
553 		&(amdgpu_ras_get_context(adev)->eeprom_control));
554 
555 	if (!ret) {
556 		/* Something was written to EEPROM.
557 		 */
558 		amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
559 		return size;
560 	} else {
561 		return ret;
562 	}
563 }
564 
565 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
566 	.owner = THIS_MODULE,
567 	.read = NULL,
568 	.write = amdgpu_ras_debugfs_ctrl_write,
569 	.llseek = default_llseek
570 };
571 
572 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
573 	.owner = THIS_MODULE,
574 	.read = NULL,
575 	.write = amdgpu_ras_debugfs_eeprom_write,
576 	.llseek = default_llseek
577 };
578 
579 /**
580  * DOC: AMDGPU RAS sysfs Error Count Interface
581  *
582  * It allows the user to read the error count for each IP block on the gpu through
583  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
584  *
585  * It outputs the multiple lines which report the uncorrected (ue) and corrected
586  * (ce) error counts.
587  *
588  * The format of one line is below,
589  *
590  * [ce|ue]: count
591  *
592  * Example:
593  *
594  * .. code-block:: bash
595  *
596  *	ue: 0
597  *	ce: 1
598  *
599  */
amdgpu_ras_sysfs_read(struct device * dev,struct device_attribute * attr,char * buf)600 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
601 		struct device_attribute *attr, char *buf)
602 {
603 	struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
604 	struct ras_query_if info = {
605 		.head = obj->head,
606 	};
607 
608 	if (!amdgpu_ras_get_error_query_ready(obj->adev))
609 		return sysfs_emit(buf, "Query currently inaccessible\n");
610 
611 	if (amdgpu_ras_query_error_status(obj->adev, &info))
612 		return -EINVAL;
613 
614 	if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
615 	    obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
616 		if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
617 			dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
618 	}
619 
620 	return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
621 			  "ce", info.ce_count);
622 }
623 
624 /* obj begin */
625 
626 #define get_obj(obj) do { (obj)->use++; } while (0)
627 #define alive_obj(obj) ((obj)->use)
628 
put_obj(struct ras_manager * obj)629 static inline void put_obj(struct ras_manager *obj)
630 {
631 	if (obj && (--obj->use == 0))
632 		list_del(&obj->node);
633 	if (obj && (obj->use < 0))
634 		DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
635 }
636 
637 /* make one obj and return it. */
amdgpu_ras_create_obj(struct amdgpu_device * adev,struct ras_common_if * head)638 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
639 		struct ras_common_if *head)
640 {
641 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
642 	struct ras_manager *obj;
643 
644 	if (!adev->ras_enabled || !con)
645 		return NULL;
646 
647 	if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
648 		return NULL;
649 
650 	if (head->block == AMDGPU_RAS_BLOCK__MCA) {
651 		if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
652 			return NULL;
653 
654 		obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
655 	} else
656 		obj = &con->objs[head->block];
657 
658 	/* already exist. return obj? */
659 	if (alive_obj(obj))
660 		return NULL;
661 
662 	obj->head = *head;
663 	obj->adev = adev;
664 	list_add(&obj->node, &con->head);
665 	get_obj(obj);
666 
667 	return obj;
668 }
669 
670 /* return an obj equal to head, or the first when head is NULL */
amdgpu_ras_find_obj(struct amdgpu_device * adev,struct ras_common_if * head)671 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
672 		struct ras_common_if *head)
673 {
674 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
675 	struct ras_manager *obj;
676 	int i;
677 
678 	if (!adev->ras_enabled || !con)
679 		return NULL;
680 
681 	if (head) {
682 		if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
683 			return NULL;
684 
685 		if (head->block == AMDGPU_RAS_BLOCK__MCA) {
686 			if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
687 				return NULL;
688 
689 			obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
690 		} else
691 			obj = &con->objs[head->block];
692 
693 		if (alive_obj(obj))
694 			return obj;
695 	} else {
696 		for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
697 			obj = &con->objs[i];
698 			if (alive_obj(obj))
699 				return obj;
700 		}
701 	}
702 
703 	return NULL;
704 }
705 /* obj end */
706 
707 /* feature ctl begin */
amdgpu_ras_is_feature_allowed(struct amdgpu_device * adev,struct ras_common_if * head)708 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
709 					 struct ras_common_if *head)
710 {
711 	return adev->ras_hw_enabled & BIT(head->block);
712 }
713 
amdgpu_ras_is_feature_enabled(struct amdgpu_device * adev,struct ras_common_if * head)714 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
715 		struct ras_common_if *head)
716 {
717 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
718 
719 	return con->features & BIT(head->block);
720 }
721 
722 /*
723  * if obj is not created, then create one.
724  * set feature enable flag.
725  */
__amdgpu_ras_feature_enable(struct amdgpu_device * adev,struct ras_common_if * head,int enable)726 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
727 		struct ras_common_if *head, int enable)
728 {
729 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
730 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
731 
732 	/* If hardware does not support ras, then do not create obj.
733 	 * But if hardware support ras, we can create the obj.
734 	 * Ras framework checks con->hw_supported to see if it need do
735 	 * corresponding initialization.
736 	 * IP checks con->support to see if it need disable ras.
737 	 */
738 	if (!amdgpu_ras_is_feature_allowed(adev, head))
739 		return 0;
740 
741 	if (enable) {
742 		if (!obj) {
743 			obj = amdgpu_ras_create_obj(adev, head);
744 			if (!obj)
745 				return -EINVAL;
746 		} else {
747 			/* In case we create obj somewhere else */
748 			get_obj(obj);
749 		}
750 		con->features |= BIT(head->block);
751 	} else {
752 		if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
753 			con->features &= ~BIT(head->block);
754 			put_obj(obj);
755 		}
756 	}
757 
758 	return 0;
759 }
760 
761 /* wrapper of psp_ras_enable_features */
amdgpu_ras_feature_enable(struct amdgpu_device * adev,struct ras_common_if * head,bool enable)762 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
763 		struct ras_common_if *head, bool enable)
764 {
765 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
766 	union ta_ras_cmd_input *info;
767 	int ret;
768 
769 	if (!con)
770 		return -EINVAL;
771 
772 	/* Do not enable ras feature if it is not allowed */
773 	if (enable &&
774 	    head->block != AMDGPU_RAS_BLOCK__GFX &&
775 	    !amdgpu_ras_is_feature_allowed(adev, head))
776 		return 0;
777 
778 	/* Only enable gfx ras feature from host side */
779 	if (head->block == AMDGPU_RAS_BLOCK__GFX &&
780 	    !amdgpu_sriov_vf(adev) &&
781 	    !amdgpu_ras_intr_triggered()) {
782 		info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
783 		if (!info)
784 			return -ENOMEM;
785 
786 		if (!enable) {
787 			info->disable_features = (struct ta_ras_disable_features_input) {
788 				.block_id =  amdgpu_ras_block_to_ta(head->block),
789 				.error_type = amdgpu_ras_error_to_ta(head->type),
790 			};
791 		} else {
792 			info->enable_features = (struct ta_ras_enable_features_input) {
793 				.block_id =  amdgpu_ras_block_to_ta(head->block),
794 				.error_type = amdgpu_ras_error_to_ta(head->type),
795 			};
796 		}
797 
798 		ret = psp_ras_enable_features(&adev->psp, info, enable);
799 		if (ret) {
800 			dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
801 				enable ? "enable":"disable",
802 				get_ras_block_str(head),
803 				amdgpu_ras_is_poison_mode_supported(adev), ret);
804 			kfree(info);
805 			return ret;
806 		}
807 
808 		kfree(info);
809 	}
810 
811 	/* setup the obj */
812 	__amdgpu_ras_feature_enable(adev, head, enable);
813 
814 	return 0;
815 }
816 
817 /* Only used in device probe stage and called only once. */
amdgpu_ras_feature_enable_on_boot(struct amdgpu_device * adev,struct ras_common_if * head,bool enable)818 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
819 		struct ras_common_if *head, bool enable)
820 {
821 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
822 	int ret;
823 
824 	if (!con)
825 		return -EINVAL;
826 
827 	if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
828 		if (enable) {
829 			/* There is no harm to issue a ras TA cmd regardless of
830 			 * the currecnt ras state.
831 			 * If current state == target state, it will do nothing
832 			 * But sometimes it requests driver to reset and repost
833 			 * with error code -EAGAIN.
834 			 */
835 			ret = amdgpu_ras_feature_enable(adev, head, 1);
836 			/* With old ras TA, we might fail to enable ras.
837 			 * Log it and just setup the object.
838 			 * TODO need remove this WA in the future.
839 			 */
840 			if (ret == -EINVAL) {
841 				ret = __amdgpu_ras_feature_enable(adev, head, 1);
842 				if (!ret)
843 					dev_info(adev->dev,
844 						"RAS INFO: %s setup object\n",
845 						get_ras_block_str(head));
846 			}
847 		} else {
848 			/* setup the object then issue a ras TA disable cmd.*/
849 			ret = __amdgpu_ras_feature_enable(adev, head, 1);
850 			if (ret)
851 				return ret;
852 
853 			/* gfx block ras dsiable cmd must send to ras-ta */
854 			if (head->block == AMDGPU_RAS_BLOCK__GFX)
855 				con->features |= BIT(head->block);
856 
857 			ret = amdgpu_ras_feature_enable(adev, head, 0);
858 
859 			/* clean gfx block ras features flag */
860 			if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
861 				con->features &= ~BIT(head->block);
862 		}
863 	} else
864 		ret = amdgpu_ras_feature_enable(adev, head, enable);
865 
866 	return ret;
867 }
868 
amdgpu_ras_disable_all_features(struct amdgpu_device * adev,bool bypass)869 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
870 		bool bypass)
871 {
872 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
873 	struct ras_manager *obj, *tmp;
874 
875 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
876 		/* bypass psp.
877 		 * aka just release the obj and corresponding flags
878 		 */
879 		if (bypass) {
880 			if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
881 				break;
882 		} else {
883 			if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
884 				break;
885 		}
886 	}
887 
888 	return con->features;
889 }
890 
amdgpu_ras_enable_all_features(struct amdgpu_device * adev,bool bypass)891 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
892 		bool bypass)
893 {
894 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
895 	int i;
896 	const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
897 
898 	for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
899 		struct ras_common_if head = {
900 			.block = i,
901 			.type = default_ras_type,
902 			.sub_block_index = 0,
903 		};
904 
905 		if (i == AMDGPU_RAS_BLOCK__MCA)
906 			continue;
907 
908 		if (bypass) {
909 			/*
910 			 * bypass psp. vbios enable ras for us.
911 			 * so just create the obj
912 			 */
913 			if (__amdgpu_ras_feature_enable(adev, &head, 1))
914 				break;
915 		} else {
916 			if (amdgpu_ras_feature_enable(adev, &head, 1))
917 				break;
918 		}
919 	}
920 
921 	for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
922 		struct ras_common_if head = {
923 			.block = AMDGPU_RAS_BLOCK__MCA,
924 			.type = default_ras_type,
925 			.sub_block_index = i,
926 		};
927 
928 		if (bypass) {
929 			/*
930 			 * bypass psp. vbios enable ras for us.
931 			 * so just create the obj
932 			 */
933 			if (__amdgpu_ras_feature_enable(adev, &head, 1))
934 				break;
935 		} else {
936 			if (amdgpu_ras_feature_enable(adev, &head, 1))
937 				break;
938 		}
939 	}
940 
941 	return con->features;
942 }
943 /* feature ctl end */
944 
amdgpu_ras_block_match_default(struct amdgpu_ras_block_object * block_obj,enum amdgpu_ras_block block)945 static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
946 		enum amdgpu_ras_block block)
947 {
948 	if (!block_obj)
949 		return -EINVAL;
950 
951 	if (block_obj->ras_comm.block == block)
952 		return 0;
953 
954 	return -EINVAL;
955 }
956 
amdgpu_ras_get_ras_block(struct amdgpu_device * adev,enum amdgpu_ras_block block,uint32_t sub_block_index)957 static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
958 					enum amdgpu_ras_block block, uint32_t sub_block_index)
959 {
960 	struct amdgpu_ras_block_list *node, *tmp;
961 	struct amdgpu_ras_block_object *obj;
962 
963 	if (block >= AMDGPU_RAS_BLOCK__LAST)
964 		return NULL;
965 
966 	list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
967 		if (!node->ras_obj) {
968 			dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
969 			continue;
970 		}
971 
972 		obj = node->ras_obj;
973 		if (obj->ras_block_match) {
974 			if (obj->ras_block_match(obj, block, sub_block_index) == 0)
975 				return obj;
976 		} else {
977 			if (amdgpu_ras_block_match_default(obj, block) == 0)
978 				return obj;
979 		}
980 	}
981 
982 	return NULL;
983 }
984 
amdgpu_ras_get_ecc_info(struct amdgpu_device * adev,struct ras_err_data * err_data)985 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
986 {
987 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
988 	int ret = 0;
989 
990 	/*
991 	 * choosing right query method according to
992 	 * whether smu support query error information
993 	 */
994 	ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
995 	if (ret == -EOPNOTSUPP) {
996 		if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
997 			adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
998 			adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
999 
1000 		/* umc query_ras_error_address is also responsible for clearing
1001 		 * error status
1002 		 */
1003 		if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1004 		    adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
1005 			adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
1006 	} else if (!ret) {
1007 		if (adev->umc.ras &&
1008 			adev->umc.ras->ecc_info_query_ras_error_count)
1009 			adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
1010 
1011 		if (adev->umc.ras &&
1012 			adev->umc.ras->ecc_info_query_ras_error_address)
1013 			adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
1014 	}
1015 }
1016 
1017 /* query/inject/cure begin */
amdgpu_ras_query_error_status(struct amdgpu_device * adev,struct ras_query_if * info)1018 int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
1019 				  struct ras_query_if *info)
1020 {
1021 	struct amdgpu_ras_block_object *block_obj = NULL;
1022 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1023 	struct ras_err_data err_data = {0, 0, 0, NULL};
1024 
1025 	if (!obj)
1026 		return -EINVAL;
1027 
1028 	if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
1029 		amdgpu_ras_get_ecc_info(adev, &err_data);
1030 	} else {
1031 		block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
1032 		if (!block_obj || !block_obj->hw_ops)   {
1033 			dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1034 				     get_ras_block_str(&info->head));
1035 			return -EINVAL;
1036 		}
1037 
1038 		if (block_obj->hw_ops->query_ras_error_count)
1039 			block_obj->hw_ops->query_ras_error_count(adev, &err_data);
1040 
1041 		if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
1042 		    (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
1043 		    (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
1044 				if (block_obj->hw_ops->query_ras_error_status)
1045 					block_obj->hw_ops->query_ras_error_status(adev);
1046 			}
1047 	}
1048 
1049 	obj->err_data.ue_count += err_data.ue_count;
1050 	obj->err_data.ce_count += err_data.ce_count;
1051 
1052 	info->ue_count = obj->err_data.ue_count;
1053 	info->ce_count = obj->err_data.ce_count;
1054 
1055 	if (err_data.ce_count) {
1056 		if (!adev->aid_mask &&
1057 		    adev->smuio.funcs &&
1058 		    adev->smuio.funcs->get_socket_id &&
1059 		    adev->smuio.funcs->get_die_id) {
1060 			dev_info(adev->dev, "socket: %d, die: %d "
1061 					"%ld correctable hardware errors "
1062 					"detected in %s block, no user "
1063 					"action is needed.\n",
1064 					adev->smuio.funcs->get_socket_id(adev),
1065 					adev->smuio.funcs->get_die_id(adev),
1066 					obj->err_data.ce_count,
1067 					get_ras_block_str(&info->head));
1068 		} else {
1069 			dev_info(adev->dev, "%ld correctable hardware errors "
1070 					"detected in %s block, no user "
1071 					"action is needed.\n",
1072 					obj->err_data.ce_count,
1073 					get_ras_block_str(&info->head));
1074 		}
1075 	}
1076 	if (err_data.ue_count) {
1077 		if (!adev->aid_mask &&
1078 		    adev->smuio.funcs &&
1079 		    adev->smuio.funcs->get_socket_id &&
1080 		    adev->smuio.funcs->get_die_id) {
1081 			dev_info(adev->dev, "socket: %d, die: %d "
1082 					"%ld uncorrectable hardware errors "
1083 					"detected in %s block\n",
1084 					adev->smuio.funcs->get_socket_id(adev),
1085 					adev->smuio.funcs->get_die_id(adev),
1086 					obj->err_data.ue_count,
1087 					get_ras_block_str(&info->head));
1088 		} else {
1089 			dev_info(adev->dev, "%ld uncorrectable hardware errors "
1090 					"detected in %s block\n",
1091 					obj->err_data.ue_count,
1092 					get_ras_block_str(&info->head));
1093 		}
1094 	}
1095 
1096 	return 0;
1097 }
1098 
amdgpu_ras_reset_error_status(struct amdgpu_device * adev,enum amdgpu_ras_block block)1099 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1100 		enum amdgpu_ras_block block)
1101 {
1102 	struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1103 
1104 	if (!amdgpu_ras_is_supported(adev, block))
1105 		return -EINVAL;
1106 
1107 	if (!block_obj || !block_obj->hw_ops)   {
1108 		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1109 			     ras_block_str(block));
1110 		return -EINVAL;
1111 	}
1112 
1113 	if (block_obj->hw_ops->reset_ras_error_count)
1114 		block_obj->hw_ops->reset_ras_error_count(adev);
1115 
1116 	if ((block == AMDGPU_RAS_BLOCK__GFX) ||
1117 	    (block == AMDGPU_RAS_BLOCK__MMHUB)) {
1118 		if (block_obj->hw_ops->reset_ras_error_status)
1119 			block_obj->hw_ops->reset_ras_error_status(adev);
1120 	}
1121 
1122 	return 0;
1123 }
1124 
1125 /* wrapper of psp_ras_trigger_error */
amdgpu_ras_error_inject(struct amdgpu_device * adev,struct ras_inject_if * info)1126 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1127 		struct ras_inject_if *info)
1128 {
1129 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1130 	struct ta_ras_trigger_error_input block_info = {
1131 		.block_id =  amdgpu_ras_block_to_ta(info->head.block),
1132 		.inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
1133 		.sub_block_index = info->head.sub_block_index,
1134 		.address = info->address,
1135 		.value = info->value,
1136 	};
1137 	int ret = -EINVAL;
1138 	struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
1139 							info->head.block,
1140 							info->head.sub_block_index);
1141 
1142 	/* inject on guest isn't allowed, return success directly */
1143 	if (amdgpu_sriov_vf(adev))
1144 		return 0;
1145 
1146 	if (!obj)
1147 		return -EINVAL;
1148 
1149 	if (!block_obj || !block_obj->hw_ops)	{
1150 		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1151 			     get_ras_block_str(&info->head));
1152 		return -EINVAL;
1153 	}
1154 
1155 	/* Calculate XGMI relative offset */
1156 	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
1157 	    info->head.block != AMDGPU_RAS_BLOCK__GFX) {
1158 		block_info.address =
1159 			amdgpu_xgmi_get_relative_phy_addr(adev,
1160 							  block_info.address);
1161 	}
1162 
1163 	if (block_obj->hw_ops->ras_error_inject) {
1164 		if (info->head.block == AMDGPU_RAS_BLOCK__GFX)
1165 			ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask);
1166 		else /* Special ras_error_inject is defined (e.g: xgmi) */
1167 			ret = block_obj->hw_ops->ras_error_inject(adev, &block_info,
1168 						info->instance_mask);
1169 	} else {
1170 		/* default path */
1171 		ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask);
1172 	}
1173 
1174 	if (ret)
1175 		dev_err(adev->dev, "ras inject %s failed %d\n",
1176 			get_ras_block_str(&info->head), ret);
1177 
1178 	return ret;
1179 }
1180 
1181 /**
1182  * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
1183  * @adev: pointer to AMD GPU device
1184  * @ce_count: pointer to an integer to be set to the count of correctible errors.
1185  * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
1186  * @query_info: pointer to ras_query_if
1187  *
1188  * Return 0 for query success or do nothing, otherwise return an error
1189  * on failures
1190  */
amdgpu_ras_query_error_count_helper(struct amdgpu_device * adev,unsigned long * ce_count,unsigned long * ue_count,struct ras_query_if * query_info)1191 static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
1192 					       unsigned long *ce_count,
1193 					       unsigned long *ue_count,
1194 					       struct ras_query_if *query_info)
1195 {
1196 	int ret;
1197 
1198 	if (!query_info)
1199 		/* do nothing if query_info is not specified */
1200 		return 0;
1201 
1202 	ret = amdgpu_ras_query_error_status(adev, query_info);
1203 	if (ret)
1204 		return ret;
1205 
1206 	*ce_count += query_info->ce_count;
1207 	*ue_count += query_info->ue_count;
1208 
1209 	/* some hardware/IP supports read to clear
1210 	 * no need to explictly reset the err status after the query call */
1211 	if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
1212 	    adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
1213 		if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
1214 			dev_warn(adev->dev,
1215 				 "Failed to reset error counter and error status\n");
1216 	}
1217 
1218 	return 0;
1219 }
1220 
1221 /**
1222  * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
1223  * @adev: pointer to AMD GPU device
1224  * @ce_count: pointer to an integer to be set to the count of correctible errors.
1225  * @ue_count: pointer to an integer to be set to the count of uncorrectible
1226  * errors.
1227  * @query_info: pointer to ras_query_if if the query request is only for
1228  * specific ip block; if info is NULL, then the qurey request is for
1229  * all the ip blocks that support query ras error counters/status
1230  *
1231  * If set, @ce_count or @ue_count, count and return the corresponding
1232  * error counts in those integer pointers. Return 0 if the device
1233  * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1234  */
amdgpu_ras_query_error_count(struct amdgpu_device * adev,unsigned long * ce_count,unsigned long * ue_count,struct ras_query_if * query_info)1235 int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1236 				 unsigned long *ce_count,
1237 				 unsigned long *ue_count,
1238 				 struct ras_query_if *query_info)
1239 {
1240 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1241 	struct ras_manager *obj;
1242 	unsigned long ce, ue;
1243 	int ret;
1244 
1245 	if (!adev->ras_enabled || !con)
1246 		return -EOPNOTSUPP;
1247 
1248 	/* Don't count since no reporting.
1249 	 */
1250 	if (!ce_count && !ue_count)
1251 		return 0;
1252 
1253 	ce = 0;
1254 	ue = 0;
1255 	if (!query_info) {
1256 		/* query all the ip blocks that support ras query interface */
1257 		list_for_each_entry(obj, &con->head, node) {
1258 			struct ras_query_if info = {
1259 				.head = obj->head,
1260 			};
1261 
1262 			ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
1263 		}
1264 	} else {
1265 		/* query specific ip block */
1266 		ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
1267 	}
1268 
1269 	if (ret)
1270 		return ret;
1271 
1272 	if (ce_count)
1273 		*ce_count = ce;
1274 
1275 	if (ue_count)
1276 		*ue_count = ue;
1277 
1278 	return 0;
1279 }
1280 /* query/inject/cure end */
1281 
1282 
1283 /* sysfs begin */
1284 
1285 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1286 		struct ras_badpage **bps, unsigned int *count);
1287 
amdgpu_ras_badpage_flags_str(unsigned int flags)1288 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1289 {
1290 	switch (flags) {
1291 	case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1292 		return "R";
1293 	case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1294 		return "P";
1295 	case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1296 	default:
1297 		return "F";
1298 	}
1299 }
1300 
1301 /**
1302  * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1303  *
1304  * It allows user to read the bad pages of vram on the gpu through
1305  * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1306  *
1307  * It outputs multiple lines, and each line stands for one gpu page.
1308  *
1309  * The format of one line is below,
1310  * gpu pfn : gpu page size : flags
1311  *
1312  * gpu pfn and gpu page size are printed in hex format.
1313  * flags can be one of below character,
1314  *
1315  * R: reserved, this gpu page is reserved and not able to use.
1316  *
1317  * P: pending for reserve, this gpu page is marked as bad, will be reserved
1318  * in next window of page_reserve.
1319  *
1320  * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1321  *
1322  * Examples:
1323  *
1324  * .. code-block:: bash
1325  *
1326  *	0x00000001 : 0x00001000 : R
1327  *	0x00000002 : 0x00001000 : P
1328  *
1329  */
1330 
amdgpu_ras_sysfs_badpages_read(struct file * f,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t ppos,size_t count)1331 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1332 		struct kobject *kobj, struct bin_attribute *attr,
1333 		char *buf, loff_t ppos, size_t count)
1334 {
1335 	struct amdgpu_ras *con =
1336 		container_of(attr, struct amdgpu_ras, badpages_attr);
1337 	struct amdgpu_device *adev = con->adev;
1338 	const unsigned int element_size =
1339 		sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1340 	unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1341 	unsigned int end = div64_ul(ppos + count - 1, element_size);
1342 	ssize_t s = 0;
1343 	struct ras_badpage *bps = NULL;
1344 	unsigned int bps_count = 0;
1345 
1346 	memset(buf, 0, count);
1347 
1348 	if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1349 		return 0;
1350 
1351 	for (; start < end && start < bps_count; start++)
1352 		s += scnprintf(&buf[s], element_size + 1,
1353 				"0x%08x : 0x%08x : %1s\n",
1354 				bps[start].bp,
1355 				bps[start].size,
1356 				amdgpu_ras_badpage_flags_str(bps[start].flags));
1357 
1358 	kfree(bps);
1359 
1360 	return s;
1361 }
1362 
amdgpu_ras_sysfs_features_read(struct device * dev,struct device_attribute * attr,char * buf)1363 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1364 		struct device_attribute *attr, char *buf)
1365 {
1366 	struct amdgpu_ras *con =
1367 		container_of(attr, struct amdgpu_ras, features_attr);
1368 
1369 	return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
1370 }
1371 
amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device * adev)1372 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1373 {
1374 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1375 
1376 	if (adev->dev->kobj.sd)
1377 		sysfs_remove_file_from_group(&adev->dev->kobj,
1378 				&con->badpages_attr.attr,
1379 				RAS_FS_NAME);
1380 }
1381 
amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device * adev)1382 static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
1383 {
1384 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1385 	struct attribute *attrs[] = {
1386 		&con->features_attr.attr,
1387 		NULL
1388 	};
1389 	struct attribute_group group = {
1390 		.name = RAS_FS_NAME,
1391 		.attrs = attrs,
1392 	};
1393 
1394 	if (adev->dev->kobj.sd)
1395 		sysfs_remove_group(&adev->dev->kobj, &group);
1396 
1397 	return 0;
1398 }
1399 
amdgpu_ras_sysfs_create(struct amdgpu_device * adev,struct ras_common_if * head)1400 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1401 		struct ras_common_if *head)
1402 {
1403 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1404 
1405 	if (!obj || obj->attr_inuse)
1406 		return -EINVAL;
1407 
1408 	get_obj(obj);
1409 
1410 	snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
1411 		"%s_err_count", head->name);
1412 
1413 	obj->sysfs_attr = (struct device_attribute){
1414 		.attr = {
1415 			.name = obj->fs_data.sysfs_name,
1416 			.mode = S_IRUGO,
1417 		},
1418 			.show = amdgpu_ras_sysfs_read,
1419 	};
1420 	sysfs_attr_init(&obj->sysfs_attr.attr);
1421 
1422 	if (sysfs_add_file_to_group(&adev->dev->kobj,
1423 				&obj->sysfs_attr.attr,
1424 				RAS_FS_NAME)) {
1425 		put_obj(obj);
1426 		return -EINVAL;
1427 	}
1428 
1429 	obj->attr_inuse = 1;
1430 
1431 	return 0;
1432 }
1433 
amdgpu_ras_sysfs_remove(struct amdgpu_device * adev,struct ras_common_if * head)1434 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1435 		struct ras_common_if *head)
1436 {
1437 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1438 
1439 	if (!obj || !obj->attr_inuse)
1440 		return -EINVAL;
1441 
1442 	if (adev->dev->kobj.sd)
1443 		sysfs_remove_file_from_group(&adev->dev->kobj,
1444 				&obj->sysfs_attr.attr,
1445 				RAS_FS_NAME);
1446 	obj->attr_inuse = 0;
1447 	put_obj(obj);
1448 
1449 	return 0;
1450 }
1451 
amdgpu_ras_sysfs_remove_all(struct amdgpu_device * adev)1452 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1453 {
1454 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1455 	struct ras_manager *obj, *tmp;
1456 
1457 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
1458 		amdgpu_ras_sysfs_remove(adev, &obj->head);
1459 	}
1460 
1461 	if (amdgpu_bad_page_threshold != 0)
1462 		amdgpu_ras_sysfs_remove_bad_page_node(adev);
1463 
1464 	amdgpu_ras_sysfs_remove_feature_node(adev);
1465 
1466 	return 0;
1467 }
1468 /* sysfs end */
1469 
1470 /**
1471  * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1472  *
1473  * Normally when there is an uncorrectable error, the driver will reset
1474  * the GPU to recover.  However, in the event of an unrecoverable error,
1475  * the driver provides an interface to reboot the system automatically
1476  * in that event.
1477  *
1478  * The following file in debugfs provides that interface:
1479  * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1480  *
1481  * Usage:
1482  *
1483  * .. code-block:: bash
1484  *
1485  *	echo true > .../ras/auto_reboot
1486  *
1487  */
1488 /* debugfs begin */
amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device * adev)1489 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1490 {
1491 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1492 	struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control;
1493 	struct drm_minor  *minor = adev_to_drm(adev)->primary;
1494 	struct dentry     *dir;
1495 
1496 	dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1497 	debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1498 			    &amdgpu_ras_debugfs_ctrl_ops);
1499 	debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1500 			    &amdgpu_ras_debugfs_eeprom_ops);
1501 	debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
1502 			   &con->bad_page_cnt_threshold);
1503 	debugfs_create_u32("ras_num_recs", 0444, dir, &eeprom->ras_num_recs);
1504 	debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
1505 	debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
1506 	debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
1507 			    &amdgpu_ras_debugfs_eeprom_size_ops);
1508 	con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
1509 						       S_IRUGO, dir, adev,
1510 						       &amdgpu_ras_debugfs_eeprom_table_ops);
1511 	amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
1512 
1513 	/*
1514 	 * After one uncorrectable error happens, usually GPU recovery will
1515 	 * be scheduled. But due to the known problem in GPU recovery failing
1516 	 * to bring GPU back, below interface provides one direct way to
1517 	 * user to reboot system automatically in such case within
1518 	 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1519 	 * will never be called.
1520 	 */
1521 	debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
1522 
1523 	/*
1524 	 * User could set this not to clean up hardware's error count register
1525 	 * of RAS IPs during ras recovery.
1526 	 */
1527 	debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
1528 			    &con->disable_ras_err_cnt_harvest);
1529 	return dir;
1530 }
1531 
amdgpu_ras_debugfs_create(struct amdgpu_device * adev,struct ras_fs_if * head,struct dentry * dir)1532 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1533 				      struct ras_fs_if *head,
1534 				      struct dentry *dir)
1535 {
1536 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1537 
1538 	if (!obj || !dir)
1539 		return;
1540 
1541 	get_obj(obj);
1542 
1543 	memcpy(obj->fs_data.debugfs_name,
1544 			head->debugfs_name,
1545 			sizeof(obj->fs_data.debugfs_name));
1546 
1547 	debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
1548 			    obj, &amdgpu_ras_debugfs_ops);
1549 }
1550 
amdgpu_ras_debugfs_create_all(struct amdgpu_device * adev)1551 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
1552 {
1553 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1554 	struct dentry *dir;
1555 	struct ras_manager *obj;
1556 	struct ras_fs_if fs_info;
1557 
1558 	/*
1559 	 * it won't be called in resume path, no need to check
1560 	 * suspend and gpu reset status
1561 	 */
1562 	if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
1563 		return;
1564 
1565 	dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
1566 
1567 	list_for_each_entry(obj, &con->head, node) {
1568 		if (amdgpu_ras_is_supported(adev, obj->head.block) &&
1569 			(obj->attr_inuse == 1)) {
1570 			sprintf(fs_info.debugfs_name, "%s_err_inject",
1571 					get_ras_block_str(&obj->head));
1572 			fs_info.head = obj->head;
1573 			amdgpu_ras_debugfs_create(adev, &fs_info, dir);
1574 		}
1575 	}
1576 }
1577 
1578 /* debugfs end */
1579 
1580 /* ras fs */
1581 static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
1582 		amdgpu_ras_sysfs_badpages_read, NULL, 0);
1583 static DEVICE_ATTR(features, S_IRUGO,
1584 		amdgpu_ras_sysfs_features_read, NULL);
amdgpu_ras_fs_init(struct amdgpu_device * adev)1585 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
1586 {
1587 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1588 	struct attribute_group group = {
1589 		.name = RAS_FS_NAME,
1590 	};
1591 	struct attribute *attrs[] = {
1592 		&con->features_attr.attr,
1593 		NULL
1594 	};
1595 	struct bin_attribute *bin_attrs[] = {
1596 		NULL,
1597 		NULL,
1598 	};
1599 	int r;
1600 
1601 	/* add features entry */
1602 	con->features_attr = dev_attr_features;
1603 	group.attrs = attrs;
1604 	sysfs_attr_init(attrs[0]);
1605 
1606 	if (amdgpu_bad_page_threshold != 0) {
1607 		/* add bad_page_features entry */
1608 		bin_attr_gpu_vram_bad_pages.private = NULL;
1609 		con->badpages_attr = bin_attr_gpu_vram_bad_pages;
1610 		bin_attrs[0] = &con->badpages_attr;
1611 		group.bin_attrs = bin_attrs;
1612 		sysfs_bin_attr_init(bin_attrs[0]);
1613 	}
1614 
1615 	r = sysfs_create_group(&adev->dev->kobj, &group);
1616 	if (r)
1617 		dev_err(adev->dev, "Failed to create RAS sysfs group!");
1618 
1619 	return 0;
1620 }
1621 
amdgpu_ras_fs_fini(struct amdgpu_device * adev)1622 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
1623 {
1624 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1625 	struct ras_manager *con_obj, *ip_obj, *tmp;
1626 
1627 	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1628 		list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
1629 			ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
1630 			if (ip_obj)
1631 				put_obj(ip_obj);
1632 		}
1633 	}
1634 
1635 	amdgpu_ras_sysfs_remove_all(adev);
1636 	return 0;
1637 }
1638 /* ras fs end */
1639 
1640 /* ih begin */
1641 
1642 /* For the hardware that cannot enable bif ring for both ras_controller_irq
1643  * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
1644  * register to check whether the interrupt is triggered or not, and properly
1645  * ack the interrupt if it is there
1646  */
amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device * adev)1647 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
1648 {
1649 	/* Fatal error events are handled on host side */
1650 	if (amdgpu_sriov_vf(adev))
1651 		return;
1652 
1653 	if (adev->nbio.ras &&
1654 	    adev->nbio.ras->handle_ras_controller_intr_no_bifring)
1655 		adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
1656 
1657 	if (adev->nbio.ras &&
1658 	    adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
1659 		adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
1660 }
1661 
amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager * obj,struct amdgpu_iv_entry * entry)1662 static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
1663 				struct amdgpu_iv_entry *entry)
1664 {
1665 	bool poison_stat = false;
1666 	struct amdgpu_device *adev = obj->adev;
1667 	struct amdgpu_ras_block_object *block_obj =
1668 		amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
1669 
1670 	if (!block_obj)
1671 		return;
1672 
1673 	/* both query_poison_status and handle_poison_consumption are optional,
1674 	 * but at least one of them should be implemented if we need poison
1675 	 * consumption handler
1676 	 */
1677 	if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
1678 		poison_stat = block_obj->hw_ops->query_poison_status(adev);
1679 		if (!poison_stat) {
1680 			/* Not poison consumption interrupt, no need to handle it */
1681 			dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
1682 					block_obj->ras_comm.name);
1683 
1684 			return;
1685 		}
1686 	}
1687 
1688 	amdgpu_umc_poison_handler(adev, false);
1689 
1690 	if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
1691 		poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
1692 
1693 	/* gpu reset is fallback for failed and default cases */
1694 	if (poison_stat) {
1695 		dev_info(adev->dev, "GPU reset for %s RAS poison consumption is issued!\n",
1696 				block_obj->ras_comm.name);
1697 		amdgpu_ras_reset_gpu(adev);
1698 	} else {
1699 		amdgpu_gfx_poison_consumption_handler(adev, entry);
1700 	}
1701 }
1702 
amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager * obj,struct amdgpu_iv_entry * entry)1703 static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
1704 				struct amdgpu_iv_entry *entry)
1705 {
1706 	dev_info(obj->adev->dev,
1707 		"Poison is created, no user action is needed.\n");
1708 }
1709 
amdgpu_ras_interrupt_umc_handler(struct ras_manager * obj,struct amdgpu_iv_entry * entry)1710 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
1711 				struct amdgpu_iv_entry *entry)
1712 {
1713 	struct ras_ih_data *data = &obj->ih_data;
1714 	struct ras_err_data err_data = {0, 0, 0, NULL};
1715 	int ret;
1716 
1717 	if (!data->cb)
1718 		return;
1719 
1720 	/* Let IP handle its data, maybe we need get the output
1721 	 * from the callback to update the error type/count, etc
1722 	 */
1723 	ret = data->cb(obj->adev, &err_data, entry);
1724 	/* ue will trigger an interrupt, and in that case
1725 	 * we need do a reset to recovery the whole system.
1726 	 * But leave IP do that recovery, here we just dispatch
1727 	 * the error.
1728 	 */
1729 	if (ret == AMDGPU_RAS_SUCCESS) {
1730 		/* these counts could be left as 0 if
1731 		 * some blocks do not count error number
1732 		 */
1733 		obj->err_data.ue_count += err_data.ue_count;
1734 		obj->err_data.ce_count += err_data.ce_count;
1735 	}
1736 }
1737 
amdgpu_ras_interrupt_handler(struct ras_manager * obj)1738 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
1739 {
1740 	struct ras_ih_data *data = &obj->ih_data;
1741 	struct amdgpu_iv_entry entry;
1742 
1743 	while (data->rptr != data->wptr) {
1744 		rmb();
1745 		memcpy(&entry, &data->ring[data->rptr],
1746 				data->element_size);
1747 
1748 		wmb();
1749 		data->rptr = (data->aligned_element_size +
1750 				data->rptr) % data->ring_size;
1751 
1752 		if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
1753 			if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
1754 				amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
1755 			else
1756 				amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
1757 		} else {
1758 			if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
1759 				amdgpu_ras_interrupt_umc_handler(obj, &entry);
1760 			else
1761 				dev_warn(obj->adev->dev,
1762 					"No RAS interrupt handler for non-UMC block with poison disabled.\n");
1763 		}
1764 	}
1765 }
1766 
amdgpu_ras_interrupt_process_handler(struct work_struct * work)1767 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
1768 {
1769 	struct ras_ih_data *data =
1770 		container_of(work, struct ras_ih_data, ih_work);
1771 	struct ras_manager *obj =
1772 		container_of(data, struct ras_manager, ih_data);
1773 
1774 	amdgpu_ras_interrupt_handler(obj);
1775 }
1776 
amdgpu_ras_interrupt_dispatch(struct amdgpu_device * adev,struct ras_dispatch_if * info)1777 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
1778 		struct ras_dispatch_if *info)
1779 {
1780 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1781 	struct ras_ih_data *data = &obj->ih_data;
1782 
1783 	if (!obj)
1784 		return -EINVAL;
1785 
1786 	if (data->inuse == 0)
1787 		return 0;
1788 
1789 	/* Might be overflow... */
1790 	memcpy(&data->ring[data->wptr], info->entry,
1791 			data->element_size);
1792 
1793 	wmb();
1794 	data->wptr = (data->aligned_element_size +
1795 			data->wptr) % data->ring_size;
1796 
1797 	schedule_work(&data->ih_work);
1798 
1799 	return 0;
1800 }
1801 
amdgpu_ras_interrupt_remove_handler(struct amdgpu_device * adev,struct ras_common_if * head)1802 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
1803 		struct ras_common_if *head)
1804 {
1805 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1806 	struct ras_ih_data *data;
1807 
1808 	if (!obj)
1809 		return -EINVAL;
1810 
1811 	data = &obj->ih_data;
1812 	if (data->inuse == 0)
1813 		return 0;
1814 
1815 	cancel_work_sync(&data->ih_work);
1816 
1817 	kfree(data->ring);
1818 	memset(data, 0, sizeof(*data));
1819 	put_obj(obj);
1820 
1821 	return 0;
1822 }
1823 
amdgpu_ras_interrupt_add_handler(struct amdgpu_device * adev,struct ras_common_if * head)1824 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
1825 		struct ras_common_if *head)
1826 {
1827 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1828 	struct ras_ih_data *data;
1829 	struct amdgpu_ras_block_object *ras_obj;
1830 
1831 	if (!obj) {
1832 		/* in case we registe the IH before enable ras feature */
1833 		obj = amdgpu_ras_create_obj(adev, head);
1834 		if (!obj)
1835 			return -EINVAL;
1836 	} else
1837 		get_obj(obj);
1838 
1839 	ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
1840 
1841 	data = &obj->ih_data;
1842 	/* add the callback.etc */
1843 	*data = (struct ras_ih_data) {
1844 		.inuse = 0,
1845 		.cb = ras_obj->ras_cb,
1846 		.element_size = sizeof(struct amdgpu_iv_entry),
1847 		.rptr = 0,
1848 		.wptr = 0,
1849 	};
1850 
1851 	INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
1852 
1853 	data->aligned_element_size = ALIGN(data->element_size, 8);
1854 	/* the ring can store 64 iv entries. */
1855 	data->ring_size = 64 * data->aligned_element_size;
1856 	data->ring = kmalloc(data->ring_size, GFP_KERNEL);
1857 	if (!data->ring) {
1858 		put_obj(obj);
1859 		return -ENOMEM;
1860 	}
1861 
1862 	/* IH is ready */
1863 	data->inuse = 1;
1864 
1865 	return 0;
1866 }
1867 
amdgpu_ras_interrupt_remove_all(struct amdgpu_device * adev)1868 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
1869 {
1870 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1871 	struct ras_manager *obj, *tmp;
1872 
1873 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
1874 		amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
1875 	}
1876 
1877 	return 0;
1878 }
1879 /* ih end */
1880 
1881 /* traversal all IPs except NBIO to query error counter */
amdgpu_ras_log_on_err_counter(struct amdgpu_device * adev)1882 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
1883 {
1884 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1885 	struct ras_manager *obj;
1886 
1887 	if (!adev->ras_enabled || !con)
1888 		return;
1889 
1890 	list_for_each_entry(obj, &con->head, node) {
1891 		struct ras_query_if info = {
1892 			.head = obj->head,
1893 		};
1894 
1895 		/*
1896 		 * PCIE_BIF IP has one different isr by ras controller
1897 		 * interrupt, the specific ras counter query will be
1898 		 * done in that isr. So skip such block from common
1899 		 * sync flood interrupt isr calling.
1900 		 */
1901 		if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
1902 			continue;
1903 
1904 		/*
1905 		 * this is a workaround for aldebaran, skip send msg to
1906 		 * smu to get ecc_info table due to smu handle get ecc
1907 		 * info table failed temporarily.
1908 		 * should be removed until smu fix handle ecc_info table.
1909 		 */
1910 		if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
1911 			(adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)))
1912 			continue;
1913 
1914 		amdgpu_ras_query_error_status(adev, &info);
1915 
1916 		if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
1917 		    adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4) &&
1918 		    adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 0)) {
1919 			if (amdgpu_ras_reset_error_status(adev, info.head.block))
1920 				dev_warn(adev->dev, "Failed to reset error counter and error status");
1921 		}
1922 	}
1923 }
1924 
1925 /* Parse RdRspStatus and WrRspStatus */
amdgpu_ras_error_status_query(struct amdgpu_device * adev,struct ras_query_if * info)1926 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
1927 					  struct ras_query_if *info)
1928 {
1929 	struct amdgpu_ras_block_object *block_obj;
1930 	/*
1931 	 * Only two block need to query read/write
1932 	 * RspStatus at current state
1933 	 */
1934 	if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
1935 		(info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
1936 		return;
1937 
1938 	block_obj = amdgpu_ras_get_ras_block(adev,
1939 					info->head.block,
1940 					info->head.sub_block_index);
1941 
1942 	if (!block_obj || !block_obj->hw_ops) {
1943 		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1944 			     get_ras_block_str(&info->head));
1945 		return;
1946 	}
1947 
1948 	if (block_obj->hw_ops->query_ras_error_status)
1949 		block_obj->hw_ops->query_ras_error_status(adev);
1950 
1951 }
1952 
amdgpu_ras_query_err_status(struct amdgpu_device * adev)1953 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
1954 {
1955 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1956 	struct ras_manager *obj;
1957 
1958 	if (!adev->ras_enabled || !con)
1959 		return;
1960 
1961 	list_for_each_entry(obj, &con->head, node) {
1962 		struct ras_query_if info = {
1963 			.head = obj->head,
1964 		};
1965 
1966 		amdgpu_ras_error_status_query(adev, &info);
1967 	}
1968 }
1969 
1970 /* recovery begin */
1971 
1972 /* return 0 on success.
1973  * caller need free bps.
1974  */
amdgpu_ras_badpages_read(struct amdgpu_device * adev,struct ras_badpage ** bps,unsigned int * count)1975 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1976 		struct ras_badpage **bps, unsigned int *count)
1977 {
1978 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1979 	struct ras_err_handler_data *data;
1980 	int i = 0;
1981 	int ret = 0, status;
1982 
1983 	if (!con || !con->eh_data || !bps || !count)
1984 		return -EINVAL;
1985 
1986 	mutex_lock(&con->recovery_lock);
1987 	data = con->eh_data;
1988 	if (!data || data->count == 0) {
1989 		*bps = NULL;
1990 		ret = -EINVAL;
1991 		goto out;
1992 	}
1993 
1994 	*bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
1995 	if (!*bps) {
1996 		ret = -ENOMEM;
1997 		goto out;
1998 	}
1999 
2000 	for (; i < data->count; i++) {
2001 		(*bps)[i] = (struct ras_badpage){
2002 			.bp = data->bps[i].retired_page,
2003 			.size = AMDGPU_GPU_PAGE_SIZE,
2004 			.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
2005 		};
2006 		status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
2007 				data->bps[i].retired_page);
2008 		if (status == -EBUSY)
2009 			(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
2010 		else if (status == -ENOENT)
2011 			(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
2012 	}
2013 
2014 	*count = data->count;
2015 out:
2016 	mutex_unlock(&con->recovery_lock);
2017 	return ret;
2018 }
2019 
amdgpu_ras_do_recovery(struct work_struct * work)2020 static void amdgpu_ras_do_recovery(struct work_struct *work)
2021 {
2022 	struct amdgpu_ras *ras =
2023 		container_of(work, struct amdgpu_ras, recovery_work);
2024 	struct amdgpu_device *remote_adev = NULL;
2025 	struct amdgpu_device *adev = ras->adev;
2026 	struct list_head device_list, *device_list_handle =  NULL;
2027 
2028 	if (!ras->disable_ras_err_cnt_harvest) {
2029 		struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2030 
2031 		/* Build list of devices to query RAS related errors */
2032 		if  (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
2033 			device_list_handle = &hive->device_list;
2034 		} else {
2035 			INIT_LIST_HEAD(&device_list);
2036 			list_add_tail(&adev->gmc.xgmi.head, &device_list);
2037 			device_list_handle = &device_list;
2038 		}
2039 
2040 		list_for_each_entry(remote_adev,
2041 				device_list_handle, gmc.xgmi.head) {
2042 			amdgpu_ras_query_err_status(remote_adev);
2043 			amdgpu_ras_log_on_err_counter(remote_adev);
2044 		}
2045 
2046 		amdgpu_put_xgmi_hive(hive);
2047 	}
2048 
2049 	if (amdgpu_device_should_recover_gpu(ras->adev)) {
2050 		struct amdgpu_reset_context reset_context;
2051 		memset(&reset_context, 0, sizeof(reset_context));
2052 
2053 		reset_context.method = AMD_RESET_METHOD_NONE;
2054 		reset_context.reset_req_dev = adev;
2055 
2056 		/* Perform full reset in fatal error mode */
2057 		if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
2058 			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2059 		else {
2060 			clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2061 
2062 			if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) {
2063 				ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET;
2064 				reset_context.method = AMD_RESET_METHOD_MODE2;
2065 			}
2066 
2067 			/* Fatal error occurs in poison mode, mode1 reset is used to
2068 			 * recover gpu.
2069 			 */
2070 			if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) {
2071 				ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET;
2072 				set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2073 
2074 				psp_fatal_error_recovery_quirk(&adev->psp);
2075 			}
2076 		}
2077 
2078 		amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
2079 	}
2080 	atomic_set(&ras->in_recovery, 0);
2081 }
2082 
2083 /* alloc/realloc bps array */
amdgpu_ras_realloc_eh_data_space(struct amdgpu_device * adev,struct ras_err_handler_data * data,int pages)2084 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
2085 		struct ras_err_handler_data *data, int pages)
2086 {
2087 	unsigned int old_space = data->count + data->space_left;
2088 	unsigned int new_space = old_space + pages;
2089 	unsigned int align_space = ALIGN(new_space, 512);
2090 	void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
2091 
2092 	if (!bps) {
2093 		return -ENOMEM;
2094 	}
2095 
2096 	if (data->bps) {
2097 		memcpy(bps, data->bps,
2098 				data->count * sizeof(*data->bps));
2099 		kfree(data->bps);
2100 	}
2101 
2102 	data->bps = bps;
2103 	data->space_left += align_space - old_space;
2104 	return 0;
2105 }
2106 
2107 /* it deal with vram only. */
amdgpu_ras_add_bad_pages(struct amdgpu_device * adev,struct eeprom_table_record * bps,int pages)2108 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
2109 		struct eeprom_table_record *bps, int pages)
2110 {
2111 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2112 	struct ras_err_handler_data *data;
2113 	int ret = 0;
2114 	uint32_t i;
2115 
2116 	if (!con || !con->eh_data || !bps || pages <= 0)
2117 		return 0;
2118 
2119 	mutex_lock(&con->recovery_lock);
2120 	data = con->eh_data;
2121 	if (!data)
2122 		goto out;
2123 
2124 	for (i = 0; i < pages; i++) {
2125 		if (amdgpu_ras_check_bad_page_unlock(con,
2126 			bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
2127 			continue;
2128 
2129 		if (!data->space_left &&
2130 			amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
2131 			ret = -ENOMEM;
2132 			goto out;
2133 		}
2134 
2135 		amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
2136 			bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT,
2137 			AMDGPU_GPU_PAGE_SIZE);
2138 
2139 		memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
2140 		data->count++;
2141 		data->space_left--;
2142 	}
2143 out:
2144 	mutex_unlock(&con->recovery_lock);
2145 
2146 	return ret;
2147 }
2148 
2149 /*
2150  * write error record array to eeprom, the function should be
2151  * protected by recovery_lock
2152  * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
2153  */
amdgpu_ras_save_bad_pages(struct amdgpu_device * adev,unsigned long * new_cnt)2154 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
2155 		unsigned long *new_cnt)
2156 {
2157 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2158 	struct ras_err_handler_data *data;
2159 	struct amdgpu_ras_eeprom_control *control;
2160 	int save_count;
2161 
2162 	if (!con || !con->eh_data) {
2163 		if (new_cnt)
2164 			*new_cnt = 0;
2165 
2166 		return 0;
2167 	}
2168 
2169 	mutex_lock(&con->recovery_lock);
2170 	control = &con->eeprom_control;
2171 	data = con->eh_data;
2172 	save_count = data->count - control->ras_num_recs;
2173 	mutex_unlock(&con->recovery_lock);
2174 
2175 	if (new_cnt)
2176 		*new_cnt = save_count / adev->umc.retire_unit;
2177 
2178 	/* only new entries are saved */
2179 	if (save_count > 0) {
2180 		if (amdgpu_ras_eeprom_append(control,
2181 					     &data->bps[control->ras_num_recs],
2182 					     save_count)) {
2183 			dev_err(adev->dev, "Failed to save EEPROM table data!");
2184 			return -EIO;
2185 		}
2186 
2187 		dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
2188 	}
2189 
2190 	return 0;
2191 }
2192 
2193 /*
2194  * read error record array in eeprom and reserve enough space for
2195  * storing new bad pages
2196  */
amdgpu_ras_load_bad_pages(struct amdgpu_device * adev)2197 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
2198 {
2199 	struct amdgpu_ras_eeprom_control *control =
2200 		&adev->psp.ras_context.ras->eeprom_control;
2201 	struct eeprom_table_record *bps;
2202 	int ret;
2203 
2204 	/* no bad page record, skip eeprom access */
2205 	if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
2206 		return 0;
2207 
2208 	bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
2209 	if (!bps)
2210 		return -ENOMEM;
2211 
2212 	ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
2213 	if (ret)
2214 		dev_err(adev->dev, "Failed to load EEPROM table records!");
2215 	else
2216 		ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
2217 
2218 	kfree(bps);
2219 	return ret;
2220 }
2221 
amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras * con,uint64_t addr)2222 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
2223 				uint64_t addr)
2224 {
2225 	struct ras_err_handler_data *data = con->eh_data;
2226 	int i;
2227 
2228 	addr >>= AMDGPU_GPU_PAGE_SHIFT;
2229 	for (i = 0; i < data->count; i++)
2230 		if (addr == data->bps[i].retired_page)
2231 			return true;
2232 
2233 	return false;
2234 }
2235 
2236 /*
2237  * check if an address belongs to bad page
2238  *
2239  * Note: this check is only for umc block
2240  */
amdgpu_ras_check_bad_page(struct amdgpu_device * adev,uint64_t addr)2241 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
2242 				uint64_t addr)
2243 {
2244 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2245 	bool ret = false;
2246 
2247 	if (!con || !con->eh_data)
2248 		return ret;
2249 
2250 	mutex_lock(&con->recovery_lock);
2251 	ret = amdgpu_ras_check_bad_page_unlock(con, addr);
2252 	mutex_unlock(&con->recovery_lock);
2253 	return ret;
2254 }
2255 
amdgpu_ras_validate_threshold(struct amdgpu_device * adev,uint32_t max_count)2256 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
2257 					  uint32_t max_count)
2258 {
2259 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2260 
2261 	/*
2262 	 * Justification of value bad_page_cnt_threshold in ras structure
2263 	 *
2264 	 * Generally, 0 <= amdgpu_bad_page_threshold <= max record length
2265 	 * in eeprom or amdgpu_bad_page_threshold == -2, introduce two
2266 	 * scenarios accordingly.
2267 	 *
2268 	 * Bad page retirement enablement:
2269 	 *    - If amdgpu_bad_page_threshold = -2,
2270 	 *      bad_page_cnt_threshold = typical value by formula.
2271 	 *
2272 	 *    - When the value from user is 0 < amdgpu_bad_page_threshold <
2273 	 *      max record length in eeprom, use it directly.
2274 	 *
2275 	 * Bad page retirement disablement:
2276 	 *    - If amdgpu_bad_page_threshold = 0, bad page retirement
2277 	 *      functionality is disabled, and bad_page_cnt_threshold will
2278 	 *      take no effect.
2279 	 */
2280 
2281 	if (amdgpu_bad_page_threshold < 0) {
2282 		u64 val = adev->gmc.mc_vram_size;
2283 
2284 		do_div(val, RAS_BAD_PAGE_COVER);
2285 		con->bad_page_cnt_threshold = min(lower_32_bits(val),
2286 						  max_count);
2287 	} else {
2288 		con->bad_page_cnt_threshold = min_t(int, max_count,
2289 						    amdgpu_bad_page_threshold);
2290 	}
2291 }
2292 
amdgpu_ras_recovery_init(struct amdgpu_device * adev)2293 int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
2294 {
2295 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2296 	struct ras_err_handler_data **data;
2297 	u32  max_eeprom_records_count = 0;
2298 	bool exc_err_limit = false;
2299 	int ret;
2300 
2301 	if (!con || amdgpu_sriov_vf(adev))
2302 		return 0;
2303 
2304 	/* Allow access to RAS EEPROM via debugfs, when the ASIC
2305 	 * supports RAS and debugfs is enabled, but when
2306 	 * adev->ras_enabled is unset, i.e. when "ras_enable"
2307 	 * module parameter is set to 0.
2308 	 */
2309 	con->adev = adev;
2310 
2311 	if (!adev->ras_enabled)
2312 		return 0;
2313 
2314 	data = &con->eh_data;
2315 	*data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
2316 	if (!*data) {
2317 		ret = -ENOMEM;
2318 		goto out;
2319 	}
2320 
2321 	mutex_init(&con->recovery_lock);
2322 	INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
2323 	atomic_set(&con->in_recovery, 0);
2324 	con->eeprom_control.bad_channel_bitmap = 0;
2325 
2326 	max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
2327 	amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
2328 
2329 	/* Todo: During test the SMU might fail to read the eeprom through I2C
2330 	 * when the GPU is pending on XGMI reset during probe time
2331 	 * (Mostly after second bus reset), skip it now
2332 	 */
2333 	if (adev->gmc.xgmi.pending_reset)
2334 		return 0;
2335 	ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit);
2336 	/*
2337 	 * This calling fails when exc_err_limit is true or
2338 	 * ret != 0.
2339 	 */
2340 	if (exc_err_limit || ret)
2341 		goto free;
2342 
2343 	if (con->eeprom_control.ras_num_recs) {
2344 		ret = amdgpu_ras_load_bad_pages(adev);
2345 		if (ret)
2346 			goto free;
2347 
2348 		amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
2349 
2350 		if (con->update_channel_flag == true) {
2351 			amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
2352 			con->update_channel_flag = false;
2353 		}
2354 	}
2355 
2356 #ifdef CONFIG_X86_MCE_AMD
2357 	if ((adev->asic_type == CHIP_ALDEBARAN) &&
2358 	    (adev->gmc.xgmi.connected_to_cpu))
2359 		amdgpu_register_bad_pages_mca_notifier(adev);
2360 #endif
2361 	return 0;
2362 
2363 free:
2364 	kfree((*data)->bps);
2365 	kfree(*data);
2366 	con->eh_data = NULL;
2367 out:
2368 	dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
2369 
2370 	/*
2371 	 * Except error threshold exceeding case, other failure cases in this
2372 	 * function would not fail amdgpu driver init.
2373 	 */
2374 	if (!exc_err_limit)
2375 		ret = 0;
2376 	else
2377 		ret = -EINVAL;
2378 
2379 	return ret;
2380 }
2381 
amdgpu_ras_recovery_fini(struct amdgpu_device * adev)2382 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
2383 {
2384 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2385 	struct ras_err_handler_data *data = con->eh_data;
2386 
2387 	/* recovery_init failed to init it, fini is useless */
2388 	if (!data)
2389 		return 0;
2390 
2391 	cancel_work_sync(&con->recovery_work);
2392 
2393 	mutex_lock(&con->recovery_lock);
2394 	con->eh_data = NULL;
2395 	kfree(data->bps);
2396 	kfree(data);
2397 	mutex_unlock(&con->recovery_lock);
2398 
2399 	return 0;
2400 }
2401 /* recovery end */
2402 
amdgpu_ras_asic_supported(struct amdgpu_device * adev)2403 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
2404 {
2405 	if (amdgpu_sriov_vf(adev)) {
2406 		switch (adev->ip_versions[MP0_HWIP][0]) {
2407 		case IP_VERSION(13, 0, 2):
2408 		case IP_VERSION(13, 0, 6):
2409 			return true;
2410 		default:
2411 			return false;
2412 		}
2413 	}
2414 
2415 	if (adev->asic_type == CHIP_IP_DISCOVERY) {
2416 		switch (adev->ip_versions[MP0_HWIP][0]) {
2417 		case IP_VERSION(13, 0, 0):
2418 		case IP_VERSION(13, 0, 6):
2419 		case IP_VERSION(13, 0, 10):
2420 			return true;
2421 		default:
2422 			return false;
2423 		}
2424 	}
2425 
2426 	return adev->asic_type == CHIP_VEGA10 ||
2427 		adev->asic_type == CHIP_VEGA20 ||
2428 		adev->asic_type == CHIP_ARCTURUS ||
2429 		adev->asic_type == CHIP_ALDEBARAN ||
2430 		adev->asic_type == CHIP_SIENNA_CICHLID;
2431 }
2432 
2433 /*
2434  * this is workaround for vega20 workstation sku,
2435  * force enable gfx ras, ignore vbios gfx ras flag
2436  * due to GC EDC can not write
2437  */
amdgpu_ras_get_quirks(struct amdgpu_device * adev)2438 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
2439 {
2440 	struct atom_context *ctx = adev->mode_info.atom_context;
2441 
2442 	if (!ctx)
2443 		return;
2444 
2445 	if (strnstr(ctx->vbios_pn, "D16406",
2446 		    sizeof(ctx->vbios_pn)) ||
2447 		strnstr(ctx->vbios_pn, "D36002",
2448 			sizeof(ctx->vbios_pn)))
2449 		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
2450 }
2451 
2452 /*
2453  * check hardware's ras ability which will be saved in hw_supported.
2454  * if hardware does not support ras, we can skip some ras initializtion and
2455  * forbid some ras operations from IP.
2456  * if software itself, say boot parameter, limit the ras ability. We still
2457  * need allow IP do some limited operations, like disable. In such case,
2458  * we have to initialize ras as normal. but need check if operation is
2459  * allowed or not in each function.
2460  */
amdgpu_ras_check_supported(struct amdgpu_device * adev)2461 static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
2462 {
2463 	adev->ras_hw_enabled = adev->ras_enabled = 0;
2464 
2465 	if (!amdgpu_ras_asic_supported(adev))
2466 		return;
2467 
2468 	if (!adev->gmc.xgmi.connected_to_cpu &&	!adev->gmc.is_app_apu) {
2469 		if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
2470 			dev_info(adev->dev, "MEM ECC is active.\n");
2471 			adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
2472 						   1 << AMDGPU_RAS_BLOCK__DF);
2473 		} else {
2474 			dev_info(adev->dev, "MEM ECC is not presented.\n");
2475 		}
2476 
2477 		if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
2478 			dev_info(adev->dev, "SRAM ECC is active.\n");
2479 			if (!amdgpu_sriov_vf(adev))
2480 				adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
2481 							    1 << AMDGPU_RAS_BLOCK__DF);
2482 			else
2483 				adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
2484 								1 << AMDGPU_RAS_BLOCK__SDMA |
2485 								1 << AMDGPU_RAS_BLOCK__GFX);
2486 
2487 			/* VCN/JPEG RAS can be supported on both bare metal and
2488 			 * SRIOV environment
2489 			 */
2490 			if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0) ||
2491 			    adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 0))
2492 				adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
2493 							1 << AMDGPU_RAS_BLOCK__JPEG);
2494 			else
2495 				adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
2496 							1 << AMDGPU_RAS_BLOCK__JPEG);
2497 
2498 			/*
2499 			 * XGMI RAS is not supported if xgmi num physical nodes
2500 			 * is zero
2501 			 */
2502 			if (!adev->gmc.xgmi.num_physical_nodes)
2503 				adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
2504 		} else {
2505 			dev_info(adev->dev, "SRAM ECC is not presented.\n");
2506 		}
2507 	} else {
2508 		/* driver only manages a few IP blocks RAS feature
2509 		 * when GPU is connected cpu through XGMI */
2510 		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
2511 					   1 << AMDGPU_RAS_BLOCK__SDMA |
2512 					   1 << AMDGPU_RAS_BLOCK__MMHUB);
2513 	}
2514 
2515 	amdgpu_ras_get_quirks(adev);
2516 
2517 	/* hw_supported needs to be aligned with RAS block mask. */
2518 	adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
2519 
2520 
2521 	/*
2522 	 * Disable ras feature for aqua vanjaram
2523 	 * by default on apu platform.
2524 	 */
2525 	if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6) &&
2526 	    adev->gmc.is_app_apu)
2527 		adev->ras_enabled = amdgpu_ras_enable != 1 ? 0 :
2528 			adev->ras_hw_enabled & amdgpu_ras_mask;
2529 	else
2530 		adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
2531 			adev->ras_hw_enabled & amdgpu_ras_mask;
2532 }
2533 
amdgpu_ras_counte_dw(struct work_struct * work)2534 static void amdgpu_ras_counte_dw(struct work_struct *work)
2535 {
2536 	struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
2537 					      ras_counte_delay_work.work);
2538 	struct amdgpu_device *adev = con->adev;
2539 	struct drm_device *dev = adev_to_drm(adev);
2540 	unsigned long ce_count, ue_count;
2541 	int res;
2542 
2543 	res = pm_runtime_get_sync(dev->dev);
2544 	if (res < 0)
2545 		goto Out;
2546 
2547 	/* Cache new values.
2548 	 */
2549 	if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
2550 		atomic_set(&con->ras_ce_count, ce_count);
2551 		atomic_set(&con->ras_ue_count, ue_count);
2552 	}
2553 
2554 	pm_runtime_mark_last_busy(dev->dev);
2555 Out:
2556 	pm_runtime_put_autosuspend(dev->dev);
2557 }
2558 
amdgpu_ras_query_poison_mode(struct amdgpu_device * adev)2559 static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
2560 {
2561 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2562 	bool df_poison, umc_poison;
2563 
2564 	/* poison setting is useless on SRIOV guest */
2565 	if (amdgpu_sriov_vf(adev) || !con)
2566 		return;
2567 
2568 	/* Init poison supported flag, the default value is false */
2569 	if (adev->gmc.xgmi.connected_to_cpu) {
2570 		/* enabled by default when GPU is connected to CPU */
2571 		con->poison_supported = true;
2572 	} else if (adev->df.funcs &&
2573 	    adev->df.funcs->query_ras_poison_mode &&
2574 	    adev->umc.ras &&
2575 	    adev->umc.ras->query_ras_poison_mode) {
2576 		df_poison =
2577 			adev->df.funcs->query_ras_poison_mode(adev);
2578 		umc_poison =
2579 			adev->umc.ras->query_ras_poison_mode(adev);
2580 
2581 		/* Only poison is set in both DF and UMC, we can support it */
2582 		if (df_poison && umc_poison)
2583 			con->poison_supported = true;
2584 		else if (df_poison != umc_poison)
2585 			dev_warn(adev->dev,
2586 				"Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
2587 				df_poison, umc_poison);
2588 	}
2589 }
2590 
amdgpu_ras_init(struct amdgpu_device * adev)2591 int amdgpu_ras_init(struct amdgpu_device *adev)
2592 {
2593 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2594 	int r;
2595 
2596 	if (con)
2597 		return 0;
2598 
2599 	con = kmalloc(sizeof(struct amdgpu_ras) +
2600 			sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
2601 			sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
2602 			GFP_KERNEL|__GFP_ZERO);
2603 	if (!con)
2604 		return -ENOMEM;
2605 
2606 	con->adev = adev;
2607 	INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
2608 	atomic_set(&con->ras_ce_count, 0);
2609 	atomic_set(&con->ras_ue_count, 0);
2610 
2611 	con->objs = (struct ras_manager *)(con + 1);
2612 
2613 	amdgpu_ras_set_context(adev, con);
2614 
2615 	amdgpu_ras_check_supported(adev);
2616 
2617 	if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
2618 		/* set gfx block ras context feature for VEGA20 Gaming
2619 		 * send ras disable cmd to ras ta during ras late init.
2620 		 */
2621 		if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
2622 			con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
2623 
2624 			return 0;
2625 		}
2626 
2627 		r = 0;
2628 		goto release_con;
2629 	}
2630 
2631 	con->update_channel_flag = false;
2632 	con->features = 0;
2633 	INIT_LIST_HEAD(&con->head);
2634 	/* Might need get this flag from vbios. */
2635 	con->flags = RAS_DEFAULT_FLAGS;
2636 
2637 	/* initialize nbio ras function ahead of any other
2638 	 * ras functions so hardware fatal error interrupt
2639 	 * can be enabled as early as possible */
2640 	switch (adev->ip_versions[NBIO_HWIP][0]) {
2641 	case IP_VERSION(7, 4, 0):
2642 	case IP_VERSION(7, 4, 1):
2643 	case IP_VERSION(7, 4, 4):
2644 		if (!adev->gmc.xgmi.connected_to_cpu)
2645 			adev->nbio.ras = &nbio_v7_4_ras;
2646 		break;
2647 	case IP_VERSION(4, 3, 0):
2648 		if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
2649 			/* unlike other generation of nbio ras,
2650 			 * nbio v4_3 only support fatal error interrupt
2651 			 * to inform software that DF is freezed due to
2652 			 * system fatal error event. driver should not
2653 			 * enable nbio ras in such case. Instead,
2654 			 * check DF RAS */
2655 			adev->nbio.ras = &nbio_v4_3_ras;
2656 		break;
2657 	case IP_VERSION(7, 9, 0):
2658 		if (!adev->gmc.is_app_apu)
2659 			adev->nbio.ras = &nbio_v7_9_ras;
2660 		break;
2661 	default:
2662 		/* nbio ras is not available */
2663 		break;
2664 	}
2665 
2666 	/* nbio ras block needs to be enabled ahead of other ras blocks
2667 	 * to handle fatal error */
2668 	r = amdgpu_nbio_ras_sw_init(adev);
2669 	if (r)
2670 		return r;
2671 
2672 	if (adev->nbio.ras &&
2673 	    adev->nbio.ras->init_ras_controller_interrupt) {
2674 		r = adev->nbio.ras->init_ras_controller_interrupt(adev);
2675 		if (r)
2676 			goto release_con;
2677 	}
2678 
2679 	if (adev->nbio.ras &&
2680 	    adev->nbio.ras->init_ras_err_event_athub_interrupt) {
2681 		r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
2682 		if (r)
2683 			goto release_con;
2684 	}
2685 
2686 	amdgpu_ras_query_poison_mode(adev);
2687 
2688 	if (amdgpu_ras_fs_init(adev)) {
2689 		r = -EINVAL;
2690 		goto release_con;
2691 	}
2692 
2693 	dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
2694 		 "hardware ability[%x] ras_mask[%x]\n",
2695 		 adev->ras_hw_enabled, adev->ras_enabled);
2696 
2697 	return 0;
2698 release_con:
2699 	amdgpu_ras_set_context(adev, NULL);
2700 	kfree(con);
2701 
2702 	return r;
2703 }
2704 
amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device * adev)2705 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
2706 {
2707 	if (adev->gmc.xgmi.connected_to_cpu ||
2708 	    adev->gmc.is_app_apu)
2709 		return 1;
2710 	return 0;
2711 }
2712 
amdgpu_persistent_edc_harvesting(struct amdgpu_device * adev,struct ras_common_if * ras_block)2713 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
2714 					struct ras_common_if *ras_block)
2715 {
2716 	struct ras_query_if info = {
2717 		.head = *ras_block,
2718 	};
2719 
2720 	if (!amdgpu_persistent_edc_harvesting_supported(adev))
2721 		return 0;
2722 
2723 	if (amdgpu_ras_query_error_status(adev, &info) != 0)
2724 		DRM_WARN("RAS init harvest failure");
2725 
2726 	if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
2727 		DRM_WARN("RAS init harvest reset failure");
2728 
2729 	return 0;
2730 }
2731 
amdgpu_ras_is_poison_mode_supported(struct amdgpu_device * adev)2732 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
2733 {
2734        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2735 
2736        if (!con)
2737                return false;
2738 
2739        return con->poison_supported;
2740 }
2741 
2742 /* helper function to handle common stuff in ip late init phase */
amdgpu_ras_block_late_init(struct amdgpu_device * adev,struct ras_common_if * ras_block)2743 int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
2744 			 struct ras_common_if *ras_block)
2745 {
2746 	struct amdgpu_ras_block_object *ras_obj = NULL;
2747 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2748 	struct ras_query_if *query_info;
2749 	unsigned long ue_count, ce_count;
2750 	int r;
2751 
2752 	/* disable RAS feature per IP block if it is not supported */
2753 	if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
2754 		amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
2755 		return 0;
2756 	}
2757 
2758 	r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
2759 	if (r) {
2760 		if (adev->in_suspend || amdgpu_in_reset(adev)) {
2761 			/* in resume phase, if fail to enable ras,
2762 			 * clean up all ras fs nodes, and disable ras */
2763 			goto cleanup;
2764 		} else
2765 			return r;
2766 	}
2767 
2768 	/* check for errors on warm reset edc persisant supported ASIC */
2769 	amdgpu_persistent_edc_harvesting(adev, ras_block);
2770 
2771 	/* in resume phase, no need to create ras fs node */
2772 	if (adev->in_suspend || amdgpu_in_reset(adev))
2773 		return 0;
2774 
2775 	ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
2776 	if (ras_obj->ras_cb || (ras_obj->hw_ops &&
2777 	    (ras_obj->hw_ops->query_poison_status ||
2778 	    ras_obj->hw_ops->handle_poison_consumption))) {
2779 		r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
2780 		if (r)
2781 			goto cleanup;
2782 	}
2783 
2784 	if (ras_obj->hw_ops &&
2785 	    (ras_obj->hw_ops->query_ras_error_count ||
2786 	     ras_obj->hw_ops->query_ras_error_status)) {
2787 		r = amdgpu_ras_sysfs_create(adev, ras_block);
2788 		if (r)
2789 			goto interrupt;
2790 
2791 		/* Those are the cached values at init.
2792 		 */
2793 		query_info = kzalloc(sizeof(*query_info), GFP_KERNEL);
2794 		if (!query_info)
2795 			return -ENOMEM;
2796 		memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
2797 
2798 		if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
2799 			atomic_set(&con->ras_ce_count, ce_count);
2800 			atomic_set(&con->ras_ue_count, ue_count);
2801 		}
2802 
2803 		kfree(query_info);
2804 	}
2805 
2806 	return 0;
2807 
2808 interrupt:
2809 	if (ras_obj->ras_cb)
2810 		amdgpu_ras_interrupt_remove_handler(adev, ras_block);
2811 cleanup:
2812 	amdgpu_ras_feature_enable(adev, ras_block, 0);
2813 	return r;
2814 }
2815 
amdgpu_ras_block_late_init_default(struct amdgpu_device * adev,struct ras_common_if * ras_block)2816 static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
2817 			 struct ras_common_if *ras_block)
2818 {
2819 	return amdgpu_ras_block_late_init(adev, ras_block);
2820 }
2821 
2822 /* helper function to remove ras fs node and interrupt handler */
amdgpu_ras_block_late_fini(struct amdgpu_device * adev,struct ras_common_if * ras_block)2823 void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
2824 			  struct ras_common_if *ras_block)
2825 {
2826 	struct amdgpu_ras_block_object *ras_obj;
2827 	if (!ras_block)
2828 		return;
2829 
2830 	amdgpu_ras_sysfs_remove(adev, ras_block);
2831 
2832 	ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
2833 	if (ras_obj->ras_cb)
2834 		amdgpu_ras_interrupt_remove_handler(adev, ras_block);
2835 }
2836 
amdgpu_ras_block_late_fini_default(struct amdgpu_device * adev,struct ras_common_if * ras_block)2837 static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
2838 			  struct ras_common_if *ras_block)
2839 {
2840 	return amdgpu_ras_block_late_fini(adev, ras_block);
2841 }
2842 
2843 /* do some init work after IP late init as dependence.
2844  * and it runs in resume/gpu reset/booting up cases.
2845  */
amdgpu_ras_resume(struct amdgpu_device * adev)2846 void amdgpu_ras_resume(struct amdgpu_device *adev)
2847 {
2848 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2849 	struct ras_manager *obj, *tmp;
2850 
2851 	if (!adev->ras_enabled || !con) {
2852 		/* clean ras context for VEGA20 Gaming after send ras disable cmd */
2853 		amdgpu_release_ras_context(adev);
2854 
2855 		return;
2856 	}
2857 
2858 	if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
2859 		/* Set up all other IPs which are not implemented. There is a
2860 		 * tricky thing that IP's actual ras error type should be
2861 		 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
2862 		 * ERROR_NONE make sense anyway.
2863 		 */
2864 		amdgpu_ras_enable_all_features(adev, 1);
2865 
2866 		/* We enable ras on all hw_supported block, but as boot
2867 		 * parameter might disable some of them and one or more IP has
2868 		 * not implemented yet. So we disable them on behalf.
2869 		 */
2870 		list_for_each_entry_safe(obj, tmp, &con->head, node) {
2871 			if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
2872 				amdgpu_ras_feature_enable(adev, &obj->head, 0);
2873 				/* there should be no any reference. */
2874 				WARN_ON(alive_obj(obj));
2875 			}
2876 		}
2877 	}
2878 }
2879 
amdgpu_ras_suspend(struct amdgpu_device * adev)2880 void amdgpu_ras_suspend(struct amdgpu_device *adev)
2881 {
2882 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2883 
2884 	if (!adev->ras_enabled || !con)
2885 		return;
2886 
2887 	amdgpu_ras_disable_all_features(adev, 0);
2888 	/* Make sure all ras objects are disabled. */
2889 	if (con->features)
2890 		amdgpu_ras_disable_all_features(adev, 1);
2891 }
2892 
amdgpu_ras_late_init(struct amdgpu_device * adev)2893 int amdgpu_ras_late_init(struct amdgpu_device *adev)
2894 {
2895 	struct amdgpu_ras_block_list *node, *tmp;
2896 	struct amdgpu_ras_block_object *obj;
2897 	int r;
2898 
2899 	/* Guest side doesn't need init ras feature */
2900 	if (amdgpu_sriov_vf(adev))
2901 		return 0;
2902 
2903 	list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
2904 		if (!node->ras_obj) {
2905 			dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
2906 			continue;
2907 		}
2908 
2909 		obj = node->ras_obj;
2910 		if (obj->ras_late_init) {
2911 			r = obj->ras_late_init(adev, &obj->ras_comm);
2912 			if (r) {
2913 				dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
2914 					obj->ras_comm.name, r);
2915 				return r;
2916 			}
2917 		} else
2918 			amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
2919 	}
2920 
2921 	return 0;
2922 }
2923 
2924 /* do some fini work before IP fini as dependence */
amdgpu_ras_pre_fini(struct amdgpu_device * adev)2925 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
2926 {
2927 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2928 
2929 	if (!adev->ras_enabled || !con)
2930 		return 0;
2931 
2932 
2933 	/* Need disable ras on all IPs here before ip [hw/sw]fini */
2934 	if (con->features)
2935 		amdgpu_ras_disable_all_features(adev, 0);
2936 	amdgpu_ras_recovery_fini(adev);
2937 	return 0;
2938 }
2939 
amdgpu_ras_fini(struct amdgpu_device * adev)2940 int amdgpu_ras_fini(struct amdgpu_device *adev)
2941 {
2942 	struct amdgpu_ras_block_list *ras_node, *tmp;
2943 	struct amdgpu_ras_block_object *obj = NULL;
2944 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2945 
2946 	if (!adev->ras_enabled || !con)
2947 		return 0;
2948 
2949 	list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
2950 		if (ras_node->ras_obj) {
2951 			obj = ras_node->ras_obj;
2952 			if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
2953 			    obj->ras_fini)
2954 				obj->ras_fini(adev, &obj->ras_comm);
2955 			else
2956 				amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
2957 		}
2958 
2959 		/* Clear ras blocks from ras_list and free ras block list node */
2960 		list_del(&ras_node->node);
2961 		kfree(ras_node);
2962 	}
2963 
2964 	amdgpu_ras_fs_fini(adev);
2965 	amdgpu_ras_interrupt_remove_all(adev);
2966 
2967 	WARN(con->features, "Feature mask is not cleared");
2968 
2969 	if (con->features)
2970 		amdgpu_ras_disable_all_features(adev, 1);
2971 
2972 	cancel_delayed_work_sync(&con->ras_counte_delay_work);
2973 
2974 	amdgpu_ras_set_context(adev, NULL);
2975 	kfree(con);
2976 
2977 	return 0;
2978 }
2979 
amdgpu_ras_global_ras_isr(struct amdgpu_device * adev)2980 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
2981 {
2982 	if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
2983 		struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2984 
2985 		dev_info(adev->dev, "uncorrectable hardware error"
2986 			"(ERREVENT_ATHUB_INTERRUPT) detected!\n");
2987 
2988 		ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
2989 		amdgpu_ras_reset_gpu(adev);
2990 	}
2991 }
2992 
amdgpu_ras_need_emergency_restart(struct amdgpu_device * adev)2993 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
2994 {
2995 	if (adev->asic_type == CHIP_VEGA20 &&
2996 	    adev->pm.fw_version <= 0x283400) {
2997 		return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
2998 				amdgpu_ras_intr_triggered();
2999 	}
3000 
3001 	return false;
3002 }
3003 
amdgpu_release_ras_context(struct amdgpu_device * adev)3004 void amdgpu_release_ras_context(struct amdgpu_device *adev)
3005 {
3006 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3007 
3008 	if (!con)
3009 		return;
3010 
3011 	if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
3012 		con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
3013 		amdgpu_ras_set_context(adev, NULL);
3014 		kfree(con);
3015 	}
3016 }
3017 
3018 #ifdef CONFIG_X86_MCE_AMD
find_adev(uint32_t node_id)3019 static struct amdgpu_device *find_adev(uint32_t node_id)
3020 {
3021 	int i;
3022 	struct amdgpu_device *adev = NULL;
3023 
3024 	for (i = 0; i < mce_adev_list.num_gpu; i++) {
3025 		adev = mce_adev_list.devs[i];
3026 
3027 		if (adev && adev->gmc.xgmi.connected_to_cpu &&
3028 		    adev->gmc.xgmi.physical_node_id == node_id)
3029 			break;
3030 		adev = NULL;
3031 	}
3032 
3033 	return adev;
3034 }
3035 
3036 #define GET_MCA_IPID_GPUID(m)	(((m) >> 44) & 0xF)
3037 #define GET_UMC_INST(m)		(((m) >> 21) & 0x7)
3038 #define GET_CHAN_INDEX(m)	((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
3039 #define GPU_ID_OFFSET		8
3040 
amdgpu_bad_page_notifier(struct notifier_block * nb,unsigned long val,void * data)3041 static int amdgpu_bad_page_notifier(struct notifier_block *nb,
3042 				    unsigned long val, void *data)
3043 {
3044 	struct mce *m = (struct mce *)data;
3045 	struct amdgpu_device *adev = NULL;
3046 	uint32_t gpu_id = 0;
3047 	uint32_t umc_inst = 0, ch_inst = 0;
3048 
3049 	/*
3050 	 * If the error was generated in UMC_V2, which belongs to GPU UMCs,
3051 	 * and error occurred in DramECC (Extended error code = 0) then only
3052 	 * process the error, else bail out.
3053 	 */
3054 	if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
3055 		    (XEC(m->status, 0x3f) == 0x0)))
3056 		return NOTIFY_DONE;
3057 
3058 	/*
3059 	 * If it is correctable error, return.
3060 	 */
3061 	if (mce_is_correctable(m))
3062 		return NOTIFY_OK;
3063 
3064 	/*
3065 	 * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
3066 	 */
3067 	gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
3068 
3069 	adev = find_adev(gpu_id);
3070 	if (!adev) {
3071 		DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
3072 								gpu_id);
3073 		return NOTIFY_DONE;
3074 	}
3075 
3076 	/*
3077 	 * If it is uncorrectable error, then find out UMC instance and
3078 	 * channel index.
3079 	 */
3080 	umc_inst = GET_UMC_INST(m->ipid);
3081 	ch_inst = GET_CHAN_INDEX(m->ipid);
3082 
3083 	dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
3084 			     umc_inst, ch_inst);
3085 
3086 	if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst))
3087 		return NOTIFY_OK;
3088 	else
3089 		return NOTIFY_DONE;
3090 }
3091 
3092 static struct notifier_block amdgpu_bad_page_nb = {
3093 	.notifier_call  = amdgpu_bad_page_notifier,
3094 	.priority       = MCE_PRIO_UC,
3095 };
3096 
amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device * adev)3097 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
3098 {
3099 	/*
3100 	 * Add the adev to the mce_adev_list.
3101 	 * During mode2 reset, amdgpu device is temporarily
3102 	 * removed from the mgpu_info list which can cause
3103 	 * page retirement to fail.
3104 	 * Use this list instead of mgpu_info to find the amdgpu
3105 	 * device on which the UMC error was reported.
3106 	 */
3107 	mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
3108 
3109 	/*
3110 	 * Register the x86 notifier only once
3111 	 * with MCE subsystem.
3112 	 */
3113 	if (notifier_registered == false) {
3114 		mce_register_decode_chain(&amdgpu_bad_page_nb);
3115 		notifier_registered = true;
3116 	}
3117 }
3118 #endif
3119 
amdgpu_ras_get_context(struct amdgpu_device * adev)3120 struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
3121 {
3122 	if (!adev)
3123 		return NULL;
3124 
3125 	return adev->psp.ras_context.ras;
3126 }
3127 
amdgpu_ras_set_context(struct amdgpu_device * adev,struct amdgpu_ras * ras_con)3128 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
3129 {
3130 	if (!adev)
3131 		return -EINVAL;
3132 
3133 	adev->psp.ras_context.ras = ras_con;
3134 	return 0;
3135 }
3136 
3137 /* check if ras is supported on block, say, sdma, gfx */
amdgpu_ras_is_supported(struct amdgpu_device * adev,unsigned int block)3138 int amdgpu_ras_is_supported(struct amdgpu_device *adev,
3139 		unsigned int block)
3140 {
3141 	int ret = 0;
3142 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3143 
3144 	if (block >= AMDGPU_RAS_BLOCK_COUNT)
3145 		return 0;
3146 
3147 	ret = ras && (adev->ras_enabled & (1 << block));
3148 
3149 	/* For the special asic with mem ecc enabled but sram ecc
3150 	 * not enabled, even if the ras block is not supported on
3151 	 * .ras_enabled, if the asic supports poison mode and the
3152 	 * ras block has ras configuration, it can be considered
3153 	 * that the ras block supports ras function.
3154 	 */
3155 	if (!ret &&
3156 	    (block == AMDGPU_RAS_BLOCK__GFX ||
3157 	     block == AMDGPU_RAS_BLOCK__SDMA ||
3158 	     block == AMDGPU_RAS_BLOCK__VCN ||
3159 	     block == AMDGPU_RAS_BLOCK__JPEG) &&
3160 	    amdgpu_ras_is_poison_mode_supported(adev) &&
3161 	    amdgpu_ras_get_ras_block(adev, block, 0))
3162 		ret = 1;
3163 
3164 	return ret;
3165 }
3166 
amdgpu_ras_reset_gpu(struct amdgpu_device * adev)3167 int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
3168 {
3169 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3170 
3171 	if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
3172 		amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
3173 	return 0;
3174 }
3175 
3176 
3177 /* Register each ip ras block into amdgpu ras */
amdgpu_ras_register_ras_block(struct amdgpu_device * adev,struct amdgpu_ras_block_object * ras_block_obj)3178 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
3179 		struct amdgpu_ras_block_object *ras_block_obj)
3180 {
3181 	struct amdgpu_ras_block_list *ras_node;
3182 	if (!adev || !ras_block_obj)
3183 		return -EINVAL;
3184 
3185 	ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
3186 	if (!ras_node)
3187 		return -ENOMEM;
3188 
3189 	INIT_LIST_HEAD(&ras_node->node);
3190 	ras_node->ras_obj = ras_block_obj;
3191 	list_add_tail(&ras_node->node, &adev->ras_list);
3192 
3193 	return 0;
3194 }
3195 
amdgpu_ras_get_error_type_name(uint32_t err_type,char * err_type_name)3196 void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name)
3197 {
3198 	if (!err_type_name)
3199 		return;
3200 
3201 	switch (err_type) {
3202 	case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
3203 		sprintf(err_type_name, "correctable");
3204 		break;
3205 	case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
3206 		sprintf(err_type_name, "uncorrectable");
3207 		break;
3208 	default:
3209 		sprintf(err_type_name, "unknown");
3210 		break;
3211 	}
3212 }
3213 
amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device * adev,const struct amdgpu_ras_err_status_reg_entry * reg_entry,uint32_t instance,uint32_t * memory_id)3214 bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev,
3215 					 const struct amdgpu_ras_err_status_reg_entry *reg_entry,
3216 					 uint32_t instance,
3217 					 uint32_t *memory_id)
3218 {
3219 	uint32_t err_status_lo_data, err_status_lo_offset;
3220 
3221 	if (!reg_entry)
3222 		return false;
3223 
3224 	err_status_lo_offset =
3225 		AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
3226 					    reg_entry->seg_lo, reg_entry->reg_lo);
3227 	err_status_lo_data = RREG32(err_status_lo_offset);
3228 
3229 	if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) &&
3230 	    !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG))
3231 		return false;
3232 
3233 	*memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID);
3234 
3235 	return true;
3236 }
3237 
amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device * adev,const struct amdgpu_ras_err_status_reg_entry * reg_entry,uint32_t instance,unsigned long * err_cnt)3238 bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev,
3239 				       const struct amdgpu_ras_err_status_reg_entry *reg_entry,
3240 				       uint32_t instance,
3241 				       unsigned long *err_cnt)
3242 {
3243 	uint32_t err_status_hi_data, err_status_hi_offset;
3244 
3245 	if (!reg_entry)
3246 		return false;
3247 
3248 	err_status_hi_offset =
3249 		AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
3250 					    reg_entry->seg_hi, reg_entry->reg_hi);
3251 	err_status_hi_data = RREG32(err_status_hi_offset);
3252 
3253 	if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) &&
3254 	    !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG))
3255 		/* keep the check here in case we need to refer to the result later */
3256 		dev_dbg(adev->dev, "Invalid err_info field\n");
3257 
3258 	/* read err count */
3259 	*err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT);
3260 
3261 	return true;
3262 }
3263 
amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device * adev,const struct amdgpu_ras_err_status_reg_entry * reg_list,uint32_t reg_list_size,const struct amdgpu_ras_memory_id_entry * mem_list,uint32_t mem_list_size,uint32_t instance,uint32_t err_type,unsigned long * err_count)3264 void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev,
3265 					   const struct amdgpu_ras_err_status_reg_entry *reg_list,
3266 					   uint32_t reg_list_size,
3267 					   const struct amdgpu_ras_memory_id_entry *mem_list,
3268 					   uint32_t mem_list_size,
3269 					   uint32_t instance,
3270 					   uint32_t err_type,
3271 					   unsigned long *err_count)
3272 {
3273 	uint32_t memory_id;
3274 	unsigned long err_cnt;
3275 	char err_type_name[16];
3276 	uint32_t i, j;
3277 
3278 	for (i = 0; i < reg_list_size; i++) {
3279 		/* query memory_id from err_status_lo */
3280 		if (!amdgpu_ras_inst_get_memory_id_field(adev, &reg_list[i],
3281 							 instance, &memory_id))
3282 			continue;
3283 
3284 		/* query err_cnt from err_status_hi */
3285 		if (!amdgpu_ras_inst_get_err_cnt_field(adev, &reg_list[i],
3286 						       instance, &err_cnt) ||
3287 		    !err_cnt)
3288 			continue;
3289 
3290 		*err_count += err_cnt;
3291 
3292 		/* log the errors */
3293 		amdgpu_ras_get_error_type_name(err_type, err_type_name);
3294 		if (!mem_list) {
3295 			/* memory_list is not supported */
3296 			dev_info(adev->dev,
3297 				 "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n",
3298 				 err_cnt, err_type_name,
3299 				 reg_list[i].block_name,
3300 				 instance, memory_id);
3301 		} else {
3302 			for (j = 0; j < mem_list_size; j++) {
3303 				if (memory_id == mem_list[j].memory_id) {
3304 					dev_info(adev->dev,
3305 						 "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n",
3306 						 err_cnt, err_type_name,
3307 						 reg_list[i].block_name,
3308 						 instance, mem_list[j].name);
3309 					break;
3310 				}
3311 			}
3312 		}
3313 	}
3314 }
3315 
amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device * adev,const struct amdgpu_ras_err_status_reg_entry * reg_list,uint32_t reg_list_size,uint32_t instance)3316 void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
3317 					   const struct amdgpu_ras_err_status_reg_entry *reg_list,
3318 					   uint32_t reg_list_size,
3319 					   uint32_t instance)
3320 {
3321 	uint32_t err_status_lo_offset, err_status_hi_offset;
3322 	uint32_t i;
3323 
3324 	for (i = 0; i < reg_list_size; i++) {
3325 		err_status_lo_offset =
3326 			AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
3327 						    reg_list[i].seg_lo, reg_list[i].reg_lo);
3328 		err_status_hi_offset =
3329 			AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
3330 						    reg_list[i].seg_hi, reg_list[i].reg_hi);
3331 		WREG32(err_status_lo_offset, 0);
3332 		WREG32(err_status_hi_offset, 0);
3333 	}
3334 }
3335