Home | History | Annotate | Line # | Download | only in amdgpu
amdgpu_ras.c revision 1.3
      1 /*	$NetBSD: amdgpu_ras.c,v 1.3 2021/12/19 12:21:29 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2018 Advanced Micro Devices, Inc.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  *
     25  */
     26 #include <sys/cdefs.h>
     27 __KERNEL_RCSID(0, "$NetBSD: amdgpu_ras.c,v 1.3 2021/12/19 12:21:29 riastradh Exp $");
     28 
     29 #include <linux/debugfs.h>
     30 #include <linux/list.h>
     31 #include <linux/module.h>
     32 #include <linux/uaccess.h>
     33 #include <linux/reboot.h>
     34 #include <linux/syscalls.h>
     35 
     36 #include "amdgpu.h"
     37 #include "amdgpu_ras.h"
     38 #include "amdgpu_atomfirmware.h"
     39 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
     40 
     41 #include <linux/nbsd-namespace.h>
     42 
     43 const char *ras_error_string[] = {
     44 	"none",
     45 	"parity",
     46 	"single_correctable",
     47 	"multi_uncorrectable",
     48 	"poison",
     49 };
     50 
     51 const char *ras_block_string[] = {
     52 	"umc",
     53 	"sdma",
     54 	"gfx",
     55 	"mmhub",
     56 	"athub",
     57 	"pcie_bif",
     58 	"hdp",
     59 	"xgmi_wafl",
     60 	"df",
     61 	"smn",
     62 	"sem",
     63 	"mp0",
     64 	"mp1",
     65 	"fuse",
     66 };
     67 
     68 #define ras_err_str(i) (ras_error_string[ffs(i)])
     69 #define ras_block_str(i) (ras_block_string[i])
     70 
     71 #define AMDGPU_RAS_FLAG_INIT_BY_VBIOS		1
     72 #define AMDGPU_RAS_FLAG_INIT_NEED_RESET		2
     73 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
     74 
     75 /* inject address is 52 bits */
     76 #define	RAS_UMC_INJECT_ADDR_LIMIT	(0x1ULL << 52)
     77 
     78 enum amdgpu_ras_retire_page_reservation {
     79 	AMDGPU_RAS_RETIRE_PAGE_RESERVED,
     80 	AMDGPU_RAS_RETIRE_PAGE_PENDING,
     81 	AMDGPU_RAS_RETIRE_PAGE_FAULT,
     82 };
     83 
     84 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
     85 
     86 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
     87 				uint64_t addr) __unused;
     88 
     89 #ifndef __NetBSD__		/* XXX debugfs */
     90 
     91 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
     92 					size_t size, loff_t *pos)
     93 {
     94 	struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
     95 	struct ras_query_if info = {
     96 		.head = obj->head,
     97 	};
     98 	ssize_t s;
     99 	char val[128];
    100 
    101 	if (amdgpu_ras_error_query(obj->adev, &info))
    102 		return -EINVAL;
    103 
    104 	s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
    105 			"ue", info.ue_count,
    106 			"ce", info.ce_count);
    107 	if (*pos >= s)
    108 		return 0;
    109 
    110 	s -= *pos;
    111 	s = min_t(u64, s, size);
    112 
    113 
    114 	if (copy_to_user(buf, &val[*pos], s))
    115 		return -EINVAL;
    116 
    117 	*pos += s;
    118 
    119 	return s;
    120 }
    121 
    122 static const struct file_operations amdgpu_ras_debugfs_ops = {
    123 	.owner = THIS_MODULE,
    124 	.read = amdgpu_ras_debugfs_read,
    125 	.write = NULL,
    126 	.llseek = default_llseek
    127 };
    128 
    129 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
    130 {
    131 	int i;
    132 
    133 	for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
    134 		*block_id = i;
    135 		if (strcmp(name, ras_block_str(i)) == 0)
    136 			return 0;
    137 	}
    138 	return -EINVAL;
    139 }
    140 
    141 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
    142 		const char __user *buf, size_t size,
    143 		loff_t *pos, struct ras_debug_if *data)
    144 {
    145 	ssize_t s = min_t(u64, 64, size);
    146 	char str[65];
    147 	char block_name[33];
    148 	char err[9] = "ue";
    149 	int op = -1;
    150 	int block_id;
    151 	uint32_t sub_block;
    152 	u64 address, value;
    153 
    154 	if (*pos)
    155 		return -EINVAL;
    156 	*pos = size;
    157 
    158 	memset(str, 0, sizeof(str));
    159 	memset(data, 0, sizeof(*data));
    160 
    161 	if (copy_from_user(str, buf, s))
    162 		return -EINVAL;
    163 
    164 	if (sscanf(str, "disable %32s", block_name) == 1)
    165 		op = 0;
    166 	else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
    167 		op = 1;
    168 	else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
    169 		op = 2;
    170 	else if (str[0] && str[1] && str[2] && str[3])
    171 		/* ascii string, but commands are not matched. */
    172 		return -EINVAL;
    173 
    174 	if (op != -1) {
    175 		if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
    176 			return -EINVAL;
    177 
    178 		data->head.block = block_id;
    179 		/* only ue and ce errors are supported */
    180 		if (!memcmp("ue", err, 2))
    181 			data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
    182 		else if (!memcmp("ce", err, 2))
    183 			data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
    184 		else
    185 			return -EINVAL;
    186 
    187 		data->op = op;
    188 
    189 		if (op == 2) {
    190 			if (sscanf(str, "%*s %*s %*s %u %llu %llu",
    191 						&sub_block, &address, &value) != 3)
    192 				if (sscanf(str, "%*s %*s %*s 0x%x 0x%"PRIx64" 0x%"PRIx64"",
    193 							&sub_block, &address, &value) != 3)
    194 					return -EINVAL;
    195 			data->head.sub_block_index = sub_block;
    196 			data->inject.address = address;
    197 			data->inject.value = value;
    198 		}
    199 	} else {
    200 		if (size < sizeof(*data))
    201 			return -EINVAL;
    202 
    203 		if (copy_from_user(data, buf, sizeof(*data)))
    204 			return -EINVAL;
    205 	}
    206 
    207 	return 0;
    208 }
    209 
    210 /**
    211  * DOC: AMDGPU RAS debugfs control interface
    212  *
    213  * It accepts struct ras_debug_if who has two members.
    214  *
    215  * First member: ras_debug_if::head or ras_debug_if::inject.
    216  *
    217  * head is used to indicate which IP block will be under control.
    218  *
    219  * head has four members, they are block, type, sub_block_index, name.
    220  * block: which IP will be under control.
    221  * type: what kind of error will be enabled/disabled/injected.
    222  * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
    223  * name: the name of IP.
    224  *
    225  * inject has two more members than head, they are address, value.
    226  * As their names indicate, inject operation will write the
    227  * value to the address.
    228  *
    229  * The second member: struct ras_debug_if::op.
    230  * It has three kinds of operations.
    231  *
    232  * - 0: disable RAS on the block. Take ::head as its data.
    233  * - 1: enable RAS on the block. Take ::head as its data.
    234  * - 2: inject errors on the block. Take ::inject as its data.
    235  *
    236  * How to use the interface?
    237  *
    238  * Programs
    239  *
    240  * Copy the struct ras_debug_if in your codes and initialize it.
    241  * Write the struct to the control node.
    242  *
    243  * Shells
    244  *
    245  * .. code-block:: bash
    246  *
    247  *	echo op block [error [sub_block address value]] > .../ras/ras_ctrl
    248  *
    249  * Parameters:
    250  *
    251  * op: disable, enable, inject
    252  *	disable: only block is needed
    253  *	enable: block and error are needed
    254  *	inject: error, address, value are needed
    255  * block: umc, sdma, gfx, .........
    256  *	see ras_block_string[] for details
    257  * error: ue, ce
    258  *	ue: multi_uncorrectable
    259  *	ce: single_correctable
    260  * sub_block:
    261  *	sub block index, pass 0 if there is no sub block
    262  *
    263  * here are some examples for bash commands:
    264  *
    265  * .. code-block:: bash
    266  *
    267  *	echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
    268  *	echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
    269  *	echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
    270  *
    271  * How to check the result?
    272  *
    273  * For disable/enable, please check ras features at
    274  * /sys/class/drm/card[0/1/2...]/device/ras/features
    275  *
    276  * For inject, please check corresponding err count at
    277  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
    278  *
    279  * .. note::
    280  *	Operations are only allowed on blocks which are supported.
    281  *	Please check ras mask at /sys/module/amdgpu/parameters/ras_mask
    282  *	to see which blocks support RAS on a particular asic.
    283  *
    284  */
    285 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *buf,
    286 		size_t size, loff_t *pos)
    287 {
    288 	struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
    289 	struct ras_debug_if data;
    290 	int ret = 0;
    291 
    292 	ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
    293 	if (ret)
    294 		return -EINVAL;
    295 
    296 	if (!amdgpu_ras_is_supported(adev, data.head.block))
    297 		return -EINVAL;
    298 
    299 	switch (data.op) {
    300 	case 0:
    301 		ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
    302 		break;
    303 	case 1:
    304 		ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
    305 		break;
    306 	case 2:
    307 		if ((data.inject.address >= adev->gmc.mc_vram_size) ||
    308 		    (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
    309 			ret = -EINVAL;
    310 			break;
    311 		}
    312 
    313 		/* umc ce/ue error injection for a bad page is not allowed */
    314 		if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
    315 		    amdgpu_ras_check_bad_page(adev, data.inject.address)) {
    316 			DRM_WARN("RAS WARN: 0x%"PRIx64" has been marked as bad before error injection!\n",
    317 					data.inject.address);
    318 			break;
    319 		}
    320 
    321 		/* data.inject.address is offset instead of absolute gpu address */
    322 		ret = amdgpu_ras_error_inject(adev, &data.inject);
    323 		break;
    324 	default:
    325 		ret = -EINVAL;
    326 		break;
    327 	}
    328 
    329 	if (ret)
    330 		return -EINVAL;
    331 
    332 	return size;
    333 }
    334 
    335 /**
    336  * DOC: AMDGPU RAS debugfs EEPROM table reset interface
    337  *
    338  * Some boards contain an EEPROM which is used to persistently store a list of
    339  * bad pages which experiences ECC errors in vram.  This interface provides
    340  * a way to reset the EEPROM, e.g., after testing error injection.
    341  *
    342  * Usage:
    343  *
    344  * .. code-block:: bash
    345  *
    346  *	echo 1 > ../ras/ras_eeprom_reset
    347  *
    348  * will reset EEPROM table to 0 entries.
    349  *
    350  */
    351 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf,
    352 		size_t size, loff_t *pos)
    353 {
    354 	struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
    355 	int ret;
    356 
    357 	ret = amdgpu_ras_eeprom_reset_table(&adev->psp.ras.ras->eeprom_control);
    358 
    359 	return ret == 1 ? size : -EIO;
    360 }
    361 
    362 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
    363 	.owner = THIS_MODULE,
    364 	.read = NULL,
    365 	.write = amdgpu_ras_debugfs_ctrl_write,
    366 	.llseek = default_llseek
    367 };
    368 
    369 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
    370 	.owner = THIS_MODULE,
    371 	.read = NULL,
    372 	.write = amdgpu_ras_debugfs_eeprom_write,
    373 	.llseek = default_llseek
    374 };
    375 
    376 /**
    377  * DOC: AMDGPU RAS sysfs Error Count Interface
    378  *
    379  * It allows the user to read the error count for each IP block on the gpu through
    380  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
    381  *
    382  * It outputs the multiple lines which report the uncorrected (ue) and corrected
    383  * (ce) error counts.
    384  *
    385  * The format of one line is below,
    386  *
    387  * [ce|ue]: count
    388  *
    389  * Example:
    390  *
    391  * .. code-block:: bash
    392  *
    393  *	ue: 0
    394  *	ce: 1
    395  *
    396  */
    397 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
    398 		struct device_attribute *attr, char *buf)
    399 {
    400 	struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
    401 	struct ras_query_if info = {
    402 		.head = obj->head,
    403 	};
    404 
    405 	if (amdgpu_ras_error_query(obj->adev, &info))
    406 		return -EINVAL;
    407 
    408 	return snprintf(buf, PAGE_SIZE, "%s: %lu\n%s: %lu\n",
    409 			"ue", info.ue_count,
    410 			"ce", info.ce_count);
    411 }
    412 
    413 #endif	/* __NetBSD__ */
    414 
    415 /* obj begin */
    416 
    417 #define get_obj(obj) do { (obj)->use++; } while (0)
    418 #define alive_obj(obj) ((obj)->use)
    419 
    420 static inline void put_obj(struct ras_manager *obj)
    421 {
    422 	if (obj && --obj->use == 0)
    423 		list_del(&obj->node);
    424 	if (obj && obj->use < 0) {
    425 		 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name);
    426 	}
    427 }
    428 
    429 /* make one obj and return it. */
    430 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
    431 		struct ras_common_if *head)
    432 {
    433 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
    434 	struct ras_manager *obj;
    435 
    436 	if (!con)
    437 		return NULL;
    438 
    439 	if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
    440 		return NULL;
    441 
    442 	obj = &con->objs[head->block];
    443 	/* already exist. return obj? */
    444 	if (alive_obj(obj))
    445 		return NULL;
    446 
    447 	obj->head = *head;
    448 	obj->adev = adev;
    449 	list_add(&obj->node, &con->head);
    450 	get_obj(obj);
    451 
    452 	return obj;
    453 }
    454 
    455 /* return an obj equal to head, or the first when head is NULL */
    456 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
    457 		struct ras_common_if *head)
    458 {
    459 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
    460 	struct ras_manager *obj;
    461 	int i;
    462 
    463 	if (!con)
    464 		return NULL;
    465 
    466 	if (head) {
    467 		if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
    468 			return NULL;
    469 
    470 		obj = &con->objs[head->block];
    471 
    472 		if (alive_obj(obj)) {
    473 			WARN_ON(head->block != obj->head.block);
    474 			return obj;
    475 		}
    476 	} else {
    477 		for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
    478 			obj = &con->objs[i];
    479 			if (alive_obj(obj)) {
    480 				WARN_ON(i != obj->head.block);
    481 				return obj;
    482 			}
    483 		}
    484 	}
    485 
    486 	return NULL;
    487 }
    488 /* obj end */
    489 
    490 /* feature ctl begin */
    491 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
    492 		struct ras_common_if *head)
    493 {
    494 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
    495 
    496 	return con->hw_supported & BIT(head->block);
    497 }
    498 
    499 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
    500 		struct ras_common_if *head)
    501 {
    502 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
    503 
    504 	return con->features & BIT(head->block);
    505 }
    506 
    507 /*
    508  * if obj is not created, then create one.
    509  * set feature enable flag.
    510  */
    511 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
    512 		struct ras_common_if *head, int enable)
    513 {
    514 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
    515 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
    516 
    517 	/* If hardware does not support ras, then do not create obj.
    518 	 * But if hardware support ras, we can create the obj.
    519 	 * Ras framework checks con->hw_supported to see if it need do
    520 	 * corresponding initialization.
    521 	 * IP checks con->support to see if it need disable ras.
    522 	 */
    523 	if (!amdgpu_ras_is_feature_allowed(adev, head))
    524 		return 0;
    525 	if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
    526 		return 0;
    527 
    528 	if (enable) {
    529 		if (!obj) {
    530 			obj = amdgpu_ras_create_obj(adev, head);
    531 			if (!obj)
    532 				return -EINVAL;
    533 		} else {
    534 			/* In case we create obj somewhere else */
    535 			get_obj(obj);
    536 		}
    537 		con->features |= BIT(head->block);
    538 	} else {
    539 		if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
    540 			con->features &= ~BIT(head->block);
    541 			put_obj(obj);
    542 		}
    543 	}
    544 
    545 	return 0;
    546 }
    547 
    548 /* wrapper of psp_ras_enable_features */
    549 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
    550 		struct ras_common_if *head, bool enable)
    551 {
    552 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
    553 	union ta_ras_cmd_input info;
    554 	int ret;
    555 
    556 	if (!con)
    557 		return -EINVAL;
    558 
    559 	if (!enable) {
    560 		info.disable_features = (struct ta_ras_disable_features_input) {
    561 			.block_id =  amdgpu_ras_block_to_ta(head->block),
    562 			.error_type = amdgpu_ras_error_to_ta(head->type),
    563 		};
    564 	} else {
    565 		info.enable_features = (struct ta_ras_enable_features_input) {
    566 			.block_id =  amdgpu_ras_block_to_ta(head->block),
    567 			.error_type = amdgpu_ras_error_to_ta(head->type),
    568 		};
    569 	}
    570 
    571 	/* Do not enable if it is not allowed. */
    572 	WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
    573 	/* Are we alerady in that state we are going to set? */
    574 	if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
    575 		return 0;
    576 
    577 	if (!amdgpu_ras_intr_triggered()) {
    578 		ret = psp_ras_enable_features(&adev->psp, &info, enable);
    579 		if (ret) {
    580 			DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n",
    581 					enable ? "enable":"disable",
    582 					ras_block_str(head->block),
    583 					ret);
    584 			if (ret == TA_RAS_STATUS__RESET_NEEDED)
    585 				return -EAGAIN;
    586 			return -EINVAL;
    587 		}
    588 	}
    589 
    590 	/* setup the obj */
    591 	__amdgpu_ras_feature_enable(adev, head, enable);
    592 
    593 	return 0;
    594 }
    595 
    596 /* Only used in device probe stage and called only once. */
    597 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
    598 		struct ras_common_if *head, bool enable)
    599 {
    600 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
    601 	int ret;
    602 
    603 	if (!con)
    604 		return -EINVAL;
    605 
    606 	if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
    607 		if (enable) {
    608 			/* There is no harm to issue a ras TA cmd regardless of
    609 			 * the currecnt ras state.
    610 			 * If current state == target state, it will do nothing
    611 			 * But sometimes it requests driver to reset and repost
    612 			 * with error code -EAGAIN.
    613 			 */
    614 			ret = amdgpu_ras_feature_enable(adev, head, 1);
    615 			/* With old ras TA, we might fail to enable ras.
    616 			 * Log it and just setup the object.
    617 			 * TODO need remove this WA in the future.
    618 			 */
    619 			if (ret == -EINVAL) {
    620 				ret = __amdgpu_ras_feature_enable(adev, head, 1);
    621 				if (!ret)
    622 					DRM_INFO("RAS INFO: %s setup object\n",
    623 						ras_block_str(head->block));
    624 			}
    625 		} else {
    626 			/* setup the object then issue a ras TA disable cmd.*/
    627 			ret = __amdgpu_ras_feature_enable(adev, head, 1);
    628 			if (ret)
    629 				return ret;
    630 
    631 			ret = amdgpu_ras_feature_enable(adev, head, 0);
    632 		}
    633 	} else
    634 		ret = amdgpu_ras_feature_enable(adev, head, enable);
    635 
    636 	return ret;
    637 }
    638 
    639 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
    640 		bool bypass)
    641 {
    642 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
    643 	struct ras_manager *obj, *tmp;
    644 
    645 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
    646 		/* bypass psp.
    647 		 * aka just release the obj and corresponding flags
    648 		 */
    649 		if (bypass) {
    650 			if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
    651 				break;
    652 		} else {
    653 			if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
    654 				break;
    655 		}
    656 	}
    657 
    658 	return con->features;
    659 }
    660 
    661 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
    662 		bool bypass)
    663 {
    664 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
    665 	int ras_block_count = AMDGPU_RAS_BLOCK_COUNT;
    666 	int i;
    667 	const enum amdgpu_ras_error_type default_ras_type =
    668 		AMDGPU_RAS_ERROR__NONE;
    669 
    670 	for (i = 0; i < ras_block_count; i++) {
    671 		struct ras_common_if head = {
    672 			.block = i,
    673 			.type = default_ras_type,
    674 			.sub_block_index = 0,
    675 		};
    676 		strcpy(head.name, ras_block_str(i));
    677 		if (bypass) {
    678 			/*
    679 			 * bypass psp. vbios enable ras for us.
    680 			 * so just create the obj
    681 			 */
    682 			if (__amdgpu_ras_feature_enable(adev, &head, 1))
    683 				break;
    684 		} else {
    685 			if (amdgpu_ras_feature_enable(adev, &head, 1))
    686 				break;
    687 		}
    688 	}
    689 
    690 	return con->features;
    691 }
    692 /* feature ctl end */
    693 
    694 /* query/inject/cure begin */
    695 int amdgpu_ras_error_query(struct amdgpu_device *adev,
    696 		struct ras_query_if *info)
    697 {
    698 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
    699 	struct ras_err_data err_data = {0, 0, 0, NULL};
    700 	int i;
    701 
    702 	if (!obj)
    703 		return -EINVAL;
    704 
    705 	switch (info->head.block) {
    706 	case AMDGPU_RAS_BLOCK__UMC:
    707 		if (adev->umc.funcs->query_ras_error_count)
    708 			adev->umc.funcs->query_ras_error_count(adev, &err_data);
    709 		/* umc query_ras_error_address is also responsible for clearing
    710 		 * error status
    711 		 */
    712 		if (adev->umc.funcs->query_ras_error_address)
    713 			adev->umc.funcs->query_ras_error_address(adev, &err_data);
    714 		break;
    715 	case AMDGPU_RAS_BLOCK__SDMA:
    716 		if (adev->sdma.funcs->query_ras_error_count) {
    717 			for (i = 0; i < adev->sdma.num_instances; i++)
    718 				adev->sdma.funcs->query_ras_error_count(adev, i,
    719 									&err_data);
    720 		}
    721 		break;
    722 	case AMDGPU_RAS_BLOCK__GFX:
    723 		if (adev->gfx.funcs->query_ras_error_count)
    724 			adev->gfx.funcs->query_ras_error_count(adev, &err_data);
    725 		break;
    726 	case AMDGPU_RAS_BLOCK__MMHUB:
    727 		if (adev->mmhub.funcs->query_ras_error_count)
    728 			adev->mmhub.funcs->query_ras_error_count(adev, &err_data);
    729 		break;
    730 	case AMDGPU_RAS_BLOCK__PCIE_BIF:
    731 		if (adev->nbio.funcs->query_ras_error_count)
    732 			adev->nbio.funcs->query_ras_error_count(adev, &err_data);
    733 		break;
    734 	default:
    735 		break;
    736 	}
    737 
    738 	obj->err_data.ue_count += err_data.ue_count;
    739 	obj->err_data.ce_count += err_data.ce_count;
    740 
    741 	info->ue_count = obj->err_data.ue_count;
    742 	info->ce_count = obj->err_data.ce_count;
    743 
    744 	if (err_data.ce_count) {
    745 		dev_info(adev->dev, "%ld correctable errors detected in %s block\n",
    746 			 obj->err_data.ce_count, ras_block_str(info->head.block));
    747 	}
    748 	if (err_data.ue_count) {
    749 		dev_info(adev->dev, "%ld uncorrectable errors detected in %s block\n",
    750 			 obj->err_data.ue_count, ras_block_str(info->head.block));
    751 	}
    752 
    753 	return 0;
    754 }
    755 
    756 uint64_t get_xgmi_relative_phy_addr(struct amdgpu_device *adev, uint64_t addr)
    757 {
    758 	uint32_t df_inst_id;
    759 
    760 	if ((!adev->df.funcs)                 ||
    761 	    (!adev->df.funcs->get_df_inst_id) ||
    762 	    (!adev->df.funcs->get_dram_base_addr))
    763 		return addr;
    764 
    765 	df_inst_id = adev->df.funcs->get_df_inst_id(adev);
    766 
    767 	return addr + adev->df.funcs->get_dram_base_addr(adev, df_inst_id);
    768 }
    769 
    770 /* wrapper of psp_ras_trigger_error */
    771 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
    772 		struct ras_inject_if *info)
    773 {
    774 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
    775 	struct ta_ras_trigger_error_input block_info = {
    776 		.block_id =  amdgpu_ras_block_to_ta(info->head.block),
    777 		.inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
    778 		.sub_block_index = info->head.sub_block_index,
    779 		.address = info->address,
    780 		.value = info->value,
    781 	};
    782 	int ret = 0;
    783 
    784 	if (!obj)
    785 		return -EINVAL;
    786 
    787 	/* Calculate XGMI relative offset */
    788 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
    789 		block_info.address = get_xgmi_relative_phy_addr(adev,
    790 								block_info.address);
    791 	}
    792 
    793 	switch (info->head.block) {
    794 	case AMDGPU_RAS_BLOCK__GFX:
    795 		if (adev->gfx.funcs->ras_error_inject)
    796 			ret = adev->gfx.funcs->ras_error_inject(adev, info);
    797 		else
    798 			ret = -EINVAL;
    799 		break;
    800 	case AMDGPU_RAS_BLOCK__UMC:
    801 	case AMDGPU_RAS_BLOCK__MMHUB:
    802 	case AMDGPU_RAS_BLOCK__XGMI_WAFL:
    803 	case AMDGPU_RAS_BLOCK__PCIE_BIF:
    804 		ret = psp_ras_trigger_error(&adev->psp, &block_info);
    805 		break;
    806 	default:
    807 		DRM_INFO("%s error injection is not supported yet\n",
    808 			 ras_block_str(info->head.block));
    809 		ret = -EINVAL;
    810 	}
    811 
    812 	if (ret)
    813 		DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n",
    814 				ras_block_str(info->head.block),
    815 				ret);
    816 
    817 	return ret;
    818 }
    819 
    820 int amdgpu_ras_error_cure(struct amdgpu_device *adev,
    821 		struct ras_cure_if *info)
    822 {
    823 	/* psp fw has no cure interface for now. */
    824 	return 0;
    825 }
    826 
    827 /* get the total error counts on all IPs */
    828 unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
    829 		bool is_ce)
    830 {
    831 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
    832 	struct ras_manager *obj;
    833 	struct ras_err_data data = {0, 0};
    834 
    835 	if (!con)
    836 		return 0;
    837 
    838 	list_for_each_entry(obj, &con->head, node) {
    839 		struct ras_query_if info = {
    840 			.head = obj->head,
    841 		};
    842 
    843 		if (amdgpu_ras_error_query(adev, &info))
    844 			return 0;
    845 
    846 		data.ce_count += info.ce_count;
    847 		data.ue_count += info.ue_count;
    848 	}
    849 
    850 	return is_ce ? data.ce_count : data.ue_count;
    851 }
    852 /* query/inject/cure end */
    853 
    854 
    855 /* sysfs begin */
    856 
    857 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
    858 		struct ras_badpage **bps, unsigned int *count) __unused;
    859 
    860 #ifndef __NetBSD__		/* XXX amdgpu sysfs */
    861 
    862 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
    863 {
    864 	switch (flags) {
    865 	case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
    866 		return "R";
    867 	case AMDGPU_RAS_RETIRE_PAGE_PENDING:
    868 		return "P";
    869 	case AMDGPU_RAS_RETIRE_PAGE_FAULT:
    870 	default:
    871 		return "F";
    872 	};
    873 }
    874 
    875 /**
    876  * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
    877  *
    878  * It allows user to read the bad pages of vram on the gpu through
    879  * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
    880  *
    881  * It outputs multiple lines, and each line stands for one gpu page.
    882  *
    883  * The format of one line is below,
    884  * gpu pfn : gpu page size : flags
    885  *
    886  * gpu pfn and gpu page size are printed in hex format.
    887  * flags can be one of below character,
    888  *
    889  * R: reserved, this gpu page is reserved and not able to use.
    890  *
    891  * P: pending for reserve, this gpu page is marked as bad, will be reserved
    892  * in next window of page_reserve.
    893  *
    894  * F: unable to reserve. this gpu page can't be reserved due to some reasons.
    895  *
    896  * Examples:
    897  *
    898  * .. code-block:: bash
    899  *
    900  *	0x00000001 : 0x00001000 : R
    901  *	0x00000002 : 0x00001000 : P
    902  *
    903  */
    904 
    905 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
    906 		struct kobject *kobj, struct bin_attribute *attr,
    907 		char *buf, loff_t ppos, size_t count)
    908 {
    909 	struct amdgpu_ras *con =
    910 		container_of(attr, struct amdgpu_ras, badpages_attr);
    911 	struct amdgpu_device *adev = con->adev;
    912 	const unsigned int element_size =
    913 		sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
    914 	unsigned int start = div64_ul(ppos + element_size - 1, element_size);
    915 	unsigned int end = div64_ul(ppos + count - 1, element_size);
    916 	ssize_t s = 0;
    917 	struct ras_badpage *bps = NULL;
    918 	unsigned int bps_count = 0;
    919 
    920 	memset(buf, 0, count);
    921 
    922 	if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
    923 		return 0;
    924 
    925 	for (; start < end && start < bps_count; start++)
    926 		s += scnprintf(&buf[s], element_size + 1,
    927 				"0x%08x : 0x%08x : %1s\n",
    928 				bps[start].bp,
    929 				bps[start].size,
    930 				amdgpu_ras_badpage_flags_str(bps[start].flags));
    931 
    932 	kfree(bps);
    933 
    934 	return s;
    935 }
    936 
    937 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
    938 		struct device_attribute *attr, char *buf)
    939 {
    940 	struct amdgpu_ras *con =
    941 		container_of(attr, struct amdgpu_ras, features_attr);
    942 
    943 	return scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features);
    944 }
    945 
    946 static int amdgpu_ras_sysfs_create_feature_node(struct amdgpu_device *adev)
    947 {
    948 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
    949 	struct attribute *attrs[] = {
    950 		&con->features_attr.attr,
    951 		NULL
    952 	};
    953 	struct bin_attribute *bin_attrs[] = {
    954 		&con->badpages_attr,
    955 		NULL
    956 	};
    957 	struct attribute_group group = {
    958 		.name = "ras",
    959 		.attrs = attrs,
    960 		.bin_attrs = bin_attrs,
    961 	};
    962 
    963 	con->features_attr = (struct device_attribute) {
    964 		.attr = {
    965 			.name = "features",
    966 			.mode = S_IRUGO,
    967 		},
    968 			.show = amdgpu_ras_sysfs_features_read,
    969 	};
    970 
    971 	con->badpages_attr = (struct bin_attribute) {
    972 		.attr = {
    973 			.name = "gpu_vram_bad_pages",
    974 			.mode = S_IRUGO,
    975 		},
    976 		.size = 0,
    977 		.private = NULL,
    978 		.read = amdgpu_ras_sysfs_badpages_read,
    979 	};
    980 
    981 	sysfs_attr_init(attrs[0]);
    982 	sysfs_bin_attr_init(bin_attrs[0]);
    983 
    984 	return sysfs_create_group(&adev->dev->kobj, &group);
    985 }
    986 
    987 static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
    988 {
    989 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
    990 	struct attribute *attrs[] = {
    991 		&con->features_attr.attr,
    992 		NULL
    993 	};
    994 	struct bin_attribute *bin_attrs[] = {
    995 		&con->badpages_attr,
    996 		NULL
    997 	};
    998 	struct attribute_group group = {
    999 		.name = "ras",
   1000 		.attrs = attrs,
   1001 		.bin_attrs = bin_attrs,
   1002 	};
   1003 
   1004 	sysfs_remove_group(&adev->dev->kobj, &group);
   1005 
   1006 	return 0;
   1007 }
   1008 
   1009 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
   1010 		struct ras_fs_if *head)
   1011 {
   1012 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
   1013 
   1014 	if (!obj || obj->attr_inuse)
   1015 		return -EINVAL;
   1016 
   1017 	get_obj(obj);
   1018 
   1019 	memcpy(obj->fs_data.sysfs_name,
   1020 			head->sysfs_name,
   1021 			sizeof(obj->fs_data.sysfs_name));
   1022 
   1023 	obj->sysfs_attr = (struct device_attribute){
   1024 		.attr = {
   1025 			.name = obj->fs_data.sysfs_name,
   1026 			.mode = S_IRUGO,
   1027 		},
   1028 			.show = amdgpu_ras_sysfs_read,
   1029 	};
   1030 	sysfs_attr_init(&obj->sysfs_attr.attr);
   1031 
   1032 	if (sysfs_add_file_to_group(&adev->dev->kobj,
   1033 				&obj->sysfs_attr.attr,
   1034 				"ras")) {
   1035 		put_obj(obj);
   1036 		return -EINVAL;
   1037 	}
   1038 
   1039 	obj->attr_inuse = 1;
   1040 
   1041 	return 0;
   1042 }
   1043 
   1044 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
   1045 		struct ras_common_if *head)
   1046 {
   1047 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
   1048 
   1049 	if (!obj || !obj->attr_inuse)
   1050 		return -EINVAL;
   1051 
   1052 	sysfs_remove_file_from_group(&adev->dev->kobj,
   1053 				&obj->sysfs_attr.attr,
   1054 				"ras");
   1055 	obj->attr_inuse = 0;
   1056 	put_obj(obj);
   1057 
   1058 	return 0;
   1059 }
   1060 
   1061 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
   1062 {
   1063 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
   1064 	struct ras_manager *obj, *tmp;
   1065 
   1066 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
   1067 		amdgpu_ras_sysfs_remove(adev, &obj->head);
   1068 	}
   1069 
   1070 	amdgpu_ras_sysfs_remove_feature_node(adev);
   1071 
   1072 	return 0;
   1073 }
   1074 #endif	/* __NetBSD__ */
   1075 /* sysfs end */
   1076 
   1077 /**
   1078  * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
   1079  *
   1080  * Normally when there is an uncorrectable error, the driver will reset
   1081  * the GPU to recover.  However, in the event of an unrecoverable error,
   1082  * the driver provides an interface to reboot the system automatically
   1083  * in that event.
   1084  *
   1085  * The following file in debugfs provides that interface:
   1086  * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
   1087  *
   1088  * Usage:
   1089  *
   1090  * .. code-block:: bash
   1091  *
   1092  *	echo true > .../ras/auto_reboot
   1093  *
   1094  */
   1095 /* debugfs begin */
   1096 #ifndef __NetBSD__		/* XXX amdgpu debugfs */
   1097 static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
   1098 {
   1099 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
   1100 	struct drm_minor *minor = adev->ddev->primary;
   1101 
   1102 	con->dir = debugfs_create_dir("ras", minor->debugfs_root);
   1103 	debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir,
   1104 				adev, &amdgpu_ras_debugfs_ctrl_ops);
   1105 	debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, con->dir,
   1106 				adev, &amdgpu_ras_debugfs_eeprom_ops);
   1107 
   1108 	/*
   1109 	 * After one uncorrectable error happens, usually GPU recovery will
   1110 	 * be scheduled. But due to the known problem in GPU recovery failing
   1111 	 * to bring GPU back, below interface provides one direct way to
   1112 	 * user to reboot system automatically in such case within
   1113 	 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
   1114 	 * will never be called.
   1115 	 */
   1116 	debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, con->dir,
   1117 				&con->reboot);
   1118 }
   1119 
   1120 void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
   1121 		struct ras_fs_if *head)
   1122 {
   1123 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
   1124 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
   1125 
   1126 	if (!obj || obj->ent)
   1127 		return;
   1128 
   1129 	get_obj(obj);
   1130 
   1131 	memcpy(obj->fs_data.debugfs_name,
   1132 			head->debugfs_name,
   1133 			sizeof(obj->fs_data.debugfs_name));
   1134 
   1135 	obj->ent = debugfs_create_file(obj->fs_data.debugfs_name,
   1136 				       S_IWUGO | S_IRUGO, con->dir, obj,
   1137 				       &amdgpu_ras_debugfs_ops);
   1138 }
   1139 
   1140 void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
   1141 		struct ras_common_if *head)
   1142 {
   1143 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
   1144 
   1145 	if (!obj || !obj->ent)
   1146 		return;
   1147 
   1148 	debugfs_remove(obj->ent);
   1149 	obj->ent = NULL;
   1150 	put_obj(obj);
   1151 }
   1152 
   1153 static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
   1154 {
   1155 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
   1156 	struct ras_manager *obj, *tmp;
   1157 
   1158 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
   1159 		amdgpu_ras_debugfs_remove(adev, &obj->head);
   1160 	}
   1161 
   1162 	debugfs_remove_recursive(con->dir);
   1163 	con->dir = NULL;
   1164 }
   1165 #endif	/* __NetBSD__ */
   1166 /* debugfs end */
   1167 
   1168 /* ras fs */
   1169 
   1170 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
   1171 {
   1172 #ifndef __NetBSD__		/* XXX amdgpu debugfs sysfs */
   1173 	amdgpu_ras_sysfs_create_feature_node(adev);
   1174 	amdgpu_ras_debugfs_create_ctrl_node(adev);
   1175 #endif
   1176 
   1177 	return 0;
   1178 }
   1179 
   1180 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
   1181 {
   1182 #ifndef __NetBSD__		/* XXX amdgpu debugfs sysfs */
   1183 	amdgpu_ras_debugfs_remove_all(adev);
   1184 	amdgpu_ras_sysfs_remove_all(adev);
   1185 #endif
   1186 	return 0;
   1187 }
   1188 /* ras fs end */
   1189 
   1190 /* ih begin */
   1191 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
   1192 {
   1193 	struct ras_ih_data *data = &obj->ih_data;
   1194 	struct amdgpu_iv_entry entry;
   1195 	int ret;
   1196 	struct ras_err_data err_data = {0, 0, 0, NULL};
   1197 
   1198 	while (data->rptr != data->wptr) {
   1199 		rmb();
   1200 		memcpy(&entry, &data->ring[data->rptr],
   1201 				data->element_size);
   1202 
   1203 		wmb();
   1204 		data->rptr = (data->aligned_element_size +
   1205 				data->rptr) % data->ring_size;
   1206 
   1207 		/* Let IP handle its data, maybe we need get the output
   1208 		 * from the callback to udpate the error type/count, etc
   1209 		 */
   1210 		if (data->cb) {
   1211 			ret = data->cb(obj->adev, &err_data, &entry);
   1212 			/* ue will trigger an interrupt, and in that case
   1213 			 * we need do a reset to recovery the whole system.
   1214 			 * But leave IP do that recovery, here we just dispatch
   1215 			 * the error.
   1216 			 */
   1217 			if (ret == AMDGPU_RAS_SUCCESS) {
   1218 				/* these counts could be left as 0 if
   1219 				 * some blocks do not count error number
   1220 				 */
   1221 				obj->err_data.ue_count += err_data.ue_count;
   1222 				obj->err_data.ce_count += err_data.ce_count;
   1223 			}
   1224 		}
   1225 	}
   1226 }
   1227 
   1228 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
   1229 {
   1230 	struct ras_ih_data *data =
   1231 		container_of(work, struct ras_ih_data, ih_work);
   1232 	struct ras_manager *obj =
   1233 		container_of(data, struct ras_manager, ih_data);
   1234 
   1235 	amdgpu_ras_interrupt_handler(obj);
   1236 }
   1237 
   1238 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
   1239 		struct ras_dispatch_if *info)
   1240 {
   1241 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
   1242 	struct ras_ih_data *data = &obj->ih_data;
   1243 
   1244 	if (!obj)
   1245 		return -EINVAL;
   1246 
   1247 	if (data->inuse == 0)
   1248 		return 0;
   1249 
   1250 	/* Might be overflow... */
   1251 	memcpy(&data->ring[data->wptr], info->entry,
   1252 			data->element_size);
   1253 
   1254 	wmb();
   1255 	data->wptr = (data->aligned_element_size +
   1256 			data->wptr) % data->ring_size;
   1257 
   1258 	schedule_work(&data->ih_work);
   1259 
   1260 	return 0;
   1261 }
   1262 
   1263 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
   1264 		struct ras_ih_if *info)
   1265 {
   1266 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
   1267 	struct ras_ih_data *data;
   1268 
   1269 	if (!obj)
   1270 		return -EINVAL;
   1271 
   1272 	data = &obj->ih_data;
   1273 	if (data->inuse == 0)
   1274 		return 0;
   1275 
   1276 	cancel_work_sync(&data->ih_work);
   1277 
   1278 	kfree(data->ring);
   1279 	memset(data, 0, sizeof(*data));
   1280 	put_obj(obj);
   1281 
   1282 	return 0;
   1283 }
   1284 
   1285 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
   1286 		struct ras_ih_if *info)
   1287 {
   1288 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
   1289 	struct ras_ih_data *data;
   1290 
   1291 	if (!obj) {
   1292 		/* in case we registe the IH before enable ras feature */
   1293 		obj = amdgpu_ras_create_obj(adev, &info->head);
   1294 		if (!obj)
   1295 			return -EINVAL;
   1296 	} else
   1297 		get_obj(obj);
   1298 
   1299 	data = &obj->ih_data;
   1300 	/* add the callback.etc */
   1301 	*data = (struct ras_ih_data) {
   1302 		.inuse = 0,
   1303 		.cb = info->cb,
   1304 		.element_size = sizeof(struct amdgpu_iv_entry),
   1305 		.rptr = 0,
   1306 		.wptr = 0,
   1307 	};
   1308 
   1309 	INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
   1310 
   1311 	data->aligned_element_size = ALIGN(data->element_size, 8);
   1312 	/* the ring can store 64 iv entries. */
   1313 	data->ring_size = 64 * data->aligned_element_size;
   1314 	data->ring = kmalloc(data->ring_size, GFP_KERNEL);
   1315 	if (!data->ring) {
   1316 		put_obj(obj);
   1317 		return -ENOMEM;
   1318 	}
   1319 
   1320 	/* IH is ready */
   1321 	data->inuse = 1;
   1322 
   1323 	return 0;
   1324 }
   1325 
   1326 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
   1327 {
   1328 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
   1329 	struct ras_manager *obj, *tmp;
   1330 
   1331 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
   1332 		struct ras_ih_if info = {
   1333 			.head = obj->head,
   1334 		};
   1335 		amdgpu_ras_interrupt_remove_handler(adev, &info);
   1336 	}
   1337 
   1338 	return 0;
   1339 }
   1340 /* ih end */
   1341 
   1342 /* recovery begin */
   1343 
   1344 /* return 0 on success.
   1345  * caller need free bps.
   1346  */
   1347 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
   1348 		struct ras_badpage **bps, unsigned int *count)
   1349 {
   1350 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
   1351 	struct ras_err_handler_data *data;
   1352 	int i = 0;
   1353 	int ret = 0;
   1354 
   1355 	if (!con || !con->eh_data || !bps || !count)
   1356 		return -EINVAL;
   1357 
   1358 	mutex_lock(&con->recovery_lock);
   1359 	data = con->eh_data;
   1360 	if (!data || data->count == 0) {
   1361 		*bps = NULL;
   1362 		ret = -EINVAL;
   1363 		goto out;
   1364 	}
   1365 
   1366 	*bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
   1367 	if (!*bps) {
   1368 		ret = -ENOMEM;
   1369 		goto out;
   1370 	}
   1371 
   1372 	for (; i < data->count; i++) {
   1373 		(*bps)[i] = (struct ras_badpage){
   1374 			.bp = data->bps[i].retired_page,
   1375 			.size = AMDGPU_GPU_PAGE_SIZE,
   1376 			.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
   1377 		};
   1378 
   1379 		if (data->last_reserved <= i)
   1380 			(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
   1381 		else if (data->bps_bo[i] == NULL)
   1382 			(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
   1383 	}
   1384 
   1385 	*count = data->count;
   1386 out:
   1387 	mutex_unlock(&con->recovery_lock);
   1388 	return ret;
   1389 }
   1390 
   1391 static void amdgpu_ras_do_recovery(struct work_struct *work)
   1392 {
   1393 	struct amdgpu_ras *ras =
   1394 		container_of(work, struct amdgpu_ras, recovery_work);
   1395 
   1396 	if (amdgpu_device_should_recover_gpu(ras->adev))
   1397 		amdgpu_device_gpu_recover(ras->adev, 0);
   1398 	atomic_set(&ras->in_recovery, 0);
   1399 }
   1400 
   1401 /* alloc/realloc bps array */
   1402 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
   1403 		struct ras_err_handler_data *data, int pages)
   1404 {
   1405 	unsigned int old_space = data->count + data->space_left;
   1406 	unsigned int new_space = old_space + pages;
   1407 	unsigned int align_space = ALIGN(new_space, 512);
   1408 	void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
   1409 	struct amdgpu_bo **bps_bo =
   1410 			kmalloc(align_space * sizeof(*data->bps_bo), GFP_KERNEL);
   1411 
   1412 	if (!bps || !bps_bo) {
   1413 		kfree(bps);
   1414 		kfree(bps_bo);
   1415 		return -ENOMEM;
   1416 	}
   1417 
   1418 	if (data->bps) {
   1419 		memcpy(bps, data->bps,
   1420 				data->count * sizeof(*data->bps));
   1421 		kfree(data->bps);
   1422 	}
   1423 	if (data->bps_bo) {
   1424 		memcpy(bps_bo, data->bps_bo,
   1425 				data->count * sizeof(*data->bps_bo));
   1426 		kfree(data->bps_bo);
   1427 	}
   1428 
   1429 	data->bps = bps;
   1430 	data->bps_bo = bps_bo;
   1431 	data->space_left += align_space - old_space;
   1432 	return 0;
   1433 }
   1434 
   1435 /* it deal with vram only. */
   1436 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
   1437 		struct eeprom_table_record *bps, int pages)
   1438 {
   1439 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
   1440 	struct ras_err_handler_data *data;
   1441 	int ret = 0;
   1442 
   1443 	if (!con || !con->eh_data || !bps || pages <= 0)
   1444 		return 0;
   1445 
   1446 	mutex_lock(&con->recovery_lock);
   1447 	data = con->eh_data;
   1448 	if (!data)
   1449 		goto out;
   1450 
   1451 	if (data->space_left <= pages)
   1452 		if (amdgpu_ras_realloc_eh_data_space(adev, data, pages)) {
   1453 			ret = -ENOMEM;
   1454 			goto out;
   1455 		}
   1456 
   1457 	memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps));
   1458 	data->count += pages;
   1459 	data->space_left -= pages;
   1460 
   1461 out:
   1462 	mutex_unlock(&con->recovery_lock);
   1463 
   1464 	return ret;
   1465 }
   1466 
   1467 /*
   1468  * write error record array to eeprom, the function should be
   1469  * protected by recovery_lock
   1470  */
   1471 static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
   1472 {
   1473 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
   1474 	struct ras_err_handler_data *data;
   1475 	struct amdgpu_ras_eeprom_control *control;
   1476 	int save_count;
   1477 
   1478 	if (!con || !con->eh_data)
   1479 		return 0;
   1480 
   1481 	control = &con->eeprom_control;
   1482 	data = con->eh_data;
   1483 	save_count = data->count - control->num_recs;
   1484 	/* only new entries are saved */
   1485 	if (save_count > 0)
   1486 		if (amdgpu_ras_eeprom_process_recods(control,
   1487 							&data->bps[control->num_recs],
   1488 							true,
   1489 							save_count)) {
   1490 			DRM_ERROR("Failed to save EEPROM table data!");
   1491 			return -EIO;
   1492 		}
   1493 
   1494 	return 0;
   1495 }
   1496 
   1497 /*
   1498  * read error record array in eeprom and reserve enough space for
   1499  * storing new bad pages
   1500  */
   1501 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
   1502 {
   1503 	struct amdgpu_ras_eeprom_control *control =
   1504 					&adev->psp.ras.ras->eeprom_control;
   1505 	struct eeprom_table_record *bps = NULL;
   1506 	int ret = 0;
   1507 
   1508 	/* no bad page record, skip eeprom access */
   1509 	if (!control->num_recs)
   1510 		return ret;
   1511 
   1512 	bps = kcalloc(control->num_recs, sizeof(*bps), GFP_KERNEL);
   1513 	if (!bps)
   1514 		return -ENOMEM;
   1515 
   1516 	if (amdgpu_ras_eeprom_process_recods(control, bps, false,
   1517 		control->num_recs)) {
   1518 		DRM_ERROR("Failed to load EEPROM table records!");
   1519 		ret = -EIO;
   1520 		goto out;
   1521 	}
   1522 
   1523 	ret = amdgpu_ras_add_bad_pages(adev, bps, control->num_recs);
   1524 
   1525 out:
   1526 	kfree(bps);
   1527 	return ret;
   1528 }
   1529 
   1530 /*
   1531  * check if an address belongs to bad page
   1532  *
   1533  * Note: this check is only for umc block
   1534  */
   1535 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
   1536 				uint64_t addr)
   1537 {
   1538 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
   1539 	struct ras_err_handler_data *data;
   1540 	int i;
   1541 	bool ret = false;
   1542 
   1543 	if (!con || !con->eh_data)
   1544 		return ret;
   1545 
   1546 	mutex_lock(&con->recovery_lock);
   1547 	data = con->eh_data;
   1548 	if (!data)
   1549 		goto out;
   1550 
   1551 	addr >>= AMDGPU_GPU_PAGE_SHIFT;
   1552 	for (i = 0; i < data->count; i++)
   1553 		if (addr == data->bps[i].retired_page) {
   1554 			ret = true;
   1555 			goto out;
   1556 		}
   1557 
   1558 out:
   1559 	mutex_unlock(&con->recovery_lock);
   1560 	return ret;
   1561 }
   1562 
   1563 /* called in gpu recovery/init */
   1564 int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
   1565 {
   1566 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
   1567 	struct ras_err_handler_data *data;
   1568 	uint64_t bp;
   1569 	struct amdgpu_bo *bo = NULL;
   1570 	int i, ret = 0;
   1571 
   1572 	if (!con || !con->eh_data)
   1573 		return 0;
   1574 
   1575 	mutex_lock(&con->recovery_lock);
   1576 	data = con->eh_data;
   1577 	if (!data)
   1578 		goto out;
   1579 	/* reserve vram at driver post stage. */
   1580 	for (i = data->last_reserved; i < data->count; i++) {
   1581 		bp = data->bps[i].retired_page;
   1582 
   1583 		/* There are two cases of reserve error should be ignored:
   1584 		 * 1) a ras bad page has been allocated (used by someone);
   1585 		 * 2) a ras bad page has been reserved (duplicate error injection
   1586 		 *    for one page);
   1587 		 */
   1588 		if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
   1589 					       AMDGPU_GPU_PAGE_SIZE,
   1590 					       AMDGPU_GEM_DOMAIN_VRAM,
   1591 					       &bo, NULL))
   1592 			DRM_WARN("RAS WARN: reserve vram for retired page %"PRIx64" fail\n", bp);
   1593 
   1594 		data->bps_bo[i] = bo;
   1595 		data->last_reserved = i + 1;
   1596 		bo = NULL;
   1597 	}
   1598 
   1599 	/* continue to save bad pages to eeprom even reesrve_vram fails */
   1600 	ret = amdgpu_ras_save_bad_pages(adev);
   1601 out:
   1602 	mutex_unlock(&con->recovery_lock);
   1603 	return ret;
   1604 }
   1605 
   1606 /* called when driver unload */
   1607 static int amdgpu_ras_release_bad_pages(struct amdgpu_device *adev)
   1608 {
   1609 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
   1610 	struct ras_err_handler_data *data;
   1611 	struct amdgpu_bo *bo;
   1612 	int i;
   1613 
   1614 	if (!con || !con->eh_data)
   1615 		return 0;
   1616 
   1617 	mutex_lock(&con->recovery_lock);
   1618 	data = con->eh_data;
   1619 	if (!data)
   1620 		goto out;
   1621 
   1622 	for (i = data->last_reserved - 1; i >= 0; i--) {
   1623 		bo = data->bps_bo[i];
   1624 
   1625 		amdgpu_bo_free_kernel(&bo, NULL, NULL);
   1626 
   1627 		data->bps_bo[i] = bo;
   1628 		data->last_reserved = i;
   1629 	}
   1630 out:
   1631 	mutex_unlock(&con->recovery_lock);
   1632 	return 0;
   1633 }
   1634 
   1635 int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
   1636 {
   1637 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
   1638 	struct ras_err_handler_data **data;
   1639 	int ret;
   1640 
   1641 	if (con)
   1642 		data = &con->eh_data;
   1643 	else
   1644 		return 0;
   1645 
   1646 	*data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
   1647 	if (!*data) {
   1648 		ret = -ENOMEM;
   1649 		goto out;
   1650 	}
   1651 
   1652 	mutex_init(&con->recovery_lock);
   1653 	INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
   1654 	atomic_set(&con->in_recovery, 0);
   1655 	con->adev = adev;
   1656 
   1657 	ret = amdgpu_ras_eeprom_init(&con->eeprom_control);
   1658 	if (ret)
   1659 		goto free;
   1660 
   1661 	if (con->eeprom_control.num_recs) {
   1662 		ret = amdgpu_ras_load_bad_pages(adev);
   1663 		if (ret)
   1664 			goto free;
   1665 		ret = amdgpu_ras_reserve_bad_pages(adev);
   1666 		if (ret)
   1667 			goto release;
   1668 	}
   1669 
   1670 	return 0;
   1671 
   1672 release:
   1673 	amdgpu_ras_release_bad_pages(adev);
   1674 free:
   1675 	kfree((*data)->bps);
   1676 	kfree((*data)->bps_bo);
   1677 	kfree(*data);
   1678 	con->eh_data = NULL;
   1679 out:
   1680 	DRM_WARN("Failed to initialize ras recovery!\n");
   1681 
   1682 	return ret;
   1683 }
   1684 
   1685 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
   1686 {
   1687 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
   1688 	struct ras_err_handler_data *data = con->eh_data;
   1689 
   1690 	/* recovery_init failed to init it, fini is useless */
   1691 	if (!data)
   1692 		return 0;
   1693 
   1694 	cancel_work_sync(&con->recovery_work);
   1695 	amdgpu_ras_release_bad_pages(adev);
   1696 
   1697 	mutex_lock(&con->recovery_lock);
   1698 	con->eh_data = NULL;
   1699 	kfree(data->bps);
   1700 	kfree(data->bps_bo);
   1701 	kfree(data);
   1702 	mutex_unlock(&con->recovery_lock);
   1703 
   1704 	return 0;
   1705 }
   1706 /* recovery end */
   1707 
   1708 /* return 0 if ras will reset gpu and repost.*/
   1709 int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
   1710 		unsigned int block)
   1711 {
   1712 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
   1713 
   1714 	if (!ras)
   1715 		return -EINVAL;
   1716 
   1717 	ras->flags |= AMDGPU_RAS_FLAG_INIT_NEED_RESET;
   1718 	return 0;
   1719 }
   1720 
   1721 /*
   1722  * check hardware's ras ability which will be saved in hw_supported.
   1723  * if hardware does not support ras, we can skip some ras initializtion and
   1724  * forbid some ras operations from IP.
   1725  * if software itself, say boot parameter, limit the ras ability. We still
   1726  * need allow IP do some limited operations, like disable. In such case,
   1727  * we have to initialize ras as normal. but need check if operation is
   1728  * allowed or not in each function.
   1729  */
   1730 static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
   1731 		uint32_t *hw_supported, uint32_t *supported)
   1732 {
   1733 	*hw_supported = 0;
   1734 	*supported = 0;
   1735 
   1736 	if (amdgpu_sriov_vf(adev) ||
   1737 	    (adev->asic_type != CHIP_VEGA20 &&
   1738 	     adev->asic_type != CHIP_ARCTURUS))
   1739 		return;
   1740 
   1741 	if (adev->is_atom_fw &&
   1742 			(amdgpu_atomfirmware_mem_ecc_supported(adev) ||
   1743 			 amdgpu_atomfirmware_sram_ecc_supported(adev)))
   1744 		*hw_supported = AMDGPU_RAS_BLOCK_MASK;
   1745 
   1746 	*supported = amdgpu_ras_enable == 0 ?
   1747 				0 : *hw_supported & amdgpu_ras_mask;
   1748 }
   1749 
   1750 int amdgpu_ras_init(struct amdgpu_device *adev)
   1751 {
   1752 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
   1753 	int r;
   1754 
   1755 	if (con)
   1756 		return 0;
   1757 
   1758 	con = kmalloc(sizeof(struct amdgpu_ras) +
   1759 			sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT,
   1760 			GFP_KERNEL|__GFP_ZERO);
   1761 	if (!con)
   1762 		return -ENOMEM;
   1763 
   1764 	con->objs = (struct ras_manager *)(con + 1);
   1765 
   1766 	amdgpu_ras_set_context(adev, con);
   1767 
   1768 	amdgpu_ras_check_supported(adev, &con->hw_supported,
   1769 			&con->supported);
   1770 	if (!con->hw_supported) {
   1771 		amdgpu_ras_set_context(adev, NULL);
   1772 		kfree(con);
   1773 		return 0;
   1774 	}
   1775 
   1776 	con->features = 0;
   1777 	INIT_LIST_HEAD(&con->head);
   1778 	/* Might need get this flag from vbios. */
   1779 	con->flags = RAS_DEFAULT_FLAGS;
   1780 
   1781 	if (adev->nbio.funcs->init_ras_controller_interrupt) {
   1782 		r = adev->nbio.funcs->init_ras_controller_interrupt(adev);
   1783 		if (r)
   1784 			return r;
   1785 	}
   1786 
   1787 	if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) {
   1788 		r = adev->nbio.funcs->init_ras_err_event_athub_interrupt(adev);
   1789 		if (r)
   1790 			return r;
   1791 	}
   1792 
   1793 	amdgpu_ras_mask &= AMDGPU_RAS_BLOCK_MASK;
   1794 
   1795 	if (amdgpu_ras_fs_init(adev))
   1796 		goto fs_out;
   1797 
   1798 	DRM_INFO("RAS INFO: ras initialized successfully, "
   1799 			"hardware ability[%x] ras_mask[%x]\n",
   1800 			con->hw_supported, con->supported);
   1801 	return 0;
   1802 fs_out:
   1803 	amdgpu_ras_set_context(adev, NULL);
   1804 	kfree(con);
   1805 
   1806 	return -EINVAL;
   1807 }
   1808 
   1809 /* helper function to handle common stuff in ip late init phase */
   1810 int amdgpu_ras_late_init(struct amdgpu_device *adev,
   1811 			 struct ras_common_if *ras_block,
   1812 			 struct ras_fs_if *fs_info,
   1813 			 struct ras_ih_if *ih_info)
   1814 {
   1815 	int r;
   1816 
   1817 	/* disable RAS feature per IP block if it is not supported */
   1818 	if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
   1819 		amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
   1820 		return 0;
   1821 	}
   1822 
   1823 	r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
   1824 	if (r) {
   1825 		if (r == -EAGAIN) {
   1826 			/* request gpu reset. will run again */
   1827 			amdgpu_ras_request_reset_on_boot(adev,
   1828 					ras_block->block);
   1829 			return 0;
   1830 		} else if (adev->in_suspend || adev->in_gpu_reset) {
   1831 			/* in resume phase, if fail to enable ras,
   1832 			 * clean up all ras fs nodes, and disable ras */
   1833 			goto cleanup;
   1834 		} else
   1835 			return r;
   1836 	}
   1837 
   1838 	/* in resume phase, no need to create ras fs node */
   1839 	if (adev->in_suspend || adev->in_gpu_reset)
   1840 		return 0;
   1841 
   1842 	if (ih_info->cb) {
   1843 		r = amdgpu_ras_interrupt_add_handler(adev, ih_info);
   1844 		if (r)
   1845 			goto interrupt;
   1846 	}
   1847 
   1848 	amdgpu_ras_debugfs_create(adev, fs_info);
   1849 
   1850 	r = amdgpu_ras_sysfs_create(adev, fs_info);
   1851 	if (r)
   1852 		goto sysfs;
   1853 
   1854 	return 0;
   1855 cleanup:
   1856 	amdgpu_ras_sysfs_remove(adev, ras_block);
   1857 sysfs:
   1858 	amdgpu_ras_debugfs_remove(adev, ras_block);
   1859 	if (ih_info->cb)
   1860 		amdgpu_ras_interrupt_remove_handler(adev, ih_info);
   1861 interrupt:
   1862 	amdgpu_ras_feature_enable(adev, ras_block, 0);
   1863 	return r;
   1864 }
   1865 
   1866 /* helper function to remove ras fs node and interrupt handler */
   1867 void amdgpu_ras_late_fini(struct amdgpu_device *adev,
   1868 			  struct ras_common_if *ras_block,
   1869 			  struct ras_ih_if *ih_info)
   1870 {
   1871 	if (!ras_block || !ih_info)
   1872 		return;
   1873 
   1874 	amdgpu_ras_sysfs_remove(adev, ras_block);
   1875 	amdgpu_ras_debugfs_remove(adev, ras_block);
   1876 	if (ih_info->cb)
   1877                 amdgpu_ras_interrupt_remove_handler(adev, ih_info);
   1878 	amdgpu_ras_feature_enable(adev, ras_block, 0);
   1879 }
   1880 
   1881 /* do some init work after IP late init as dependence.
   1882  * and it runs in resume/gpu reset/booting up cases.
   1883  */
   1884 void amdgpu_ras_resume(struct amdgpu_device *adev)
   1885 {
   1886 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
   1887 	struct ras_manager *obj, *tmp;
   1888 
   1889 	if (!con)
   1890 		return;
   1891 
   1892 	if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
   1893 		/* Set up all other IPs which are not implemented. There is a
   1894 		 * tricky thing that IP's actual ras error type should be
   1895 		 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
   1896 		 * ERROR_NONE make sense anyway.
   1897 		 */
   1898 		amdgpu_ras_enable_all_features(adev, 1);
   1899 
   1900 		/* We enable ras on all hw_supported block, but as boot
   1901 		 * parameter might disable some of them and one or more IP has
   1902 		 * not implemented yet. So we disable them on behalf.
   1903 		 */
   1904 		list_for_each_entry_safe(obj, tmp, &con->head, node) {
   1905 			if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
   1906 				amdgpu_ras_feature_enable(adev, &obj->head, 0);
   1907 				/* there should be no any reference. */
   1908 				WARN_ON(alive_obj(obj));
   1909 			}
   1910 		}
   1911 	}
   1912 
   1913 	if (con->flags & AMDGPU_RAS_FLAG_INIT_NEED_RESET) {
   1914 		con->flags &= ~AMDGPU_RAS_FLAG_INIT_NEED_RESET;
   1915 		/* setup ras obj state as disabled.
   1916 		 * for init_by_vbios case.
   1917 		 * if we want to enable ras, just enable it in a normal way.
   1918 		 * If we want do disable it, need setup ras obj as enabled,
   1919 		 * then issue another TA disable cmd.
   1920 		 * See feature_enable_on_boot
   1921 		 */
   1922 		amdgpu_ras_disable_all_features(adev, 1);
   1923 		amdgpu_ras_reset_gpu(adev);
   1924 	}
   1925 }
   1926 
   1927 void amdgpu_ras_suspend(struct amdgpu_device *adev)
   1928 {
   1929 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
   1930 
   1931 	if (!con)
   1932 		return;
   1933 
   1934 	amdgpu_ras_disable_all_features(adev, 0);
   1935 	/* Make sure all ras objects are disabled. */
   1936 	if (con->features)
   1937 		amdgpu_ras_disable_all_features(adev, 1);
   1938 }
   1939 
   1940 /* do some fini work before IP fini as dependence */
   1941 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
   1942 {
   1943 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
   1944 
   1945 	if (!con)
   1946 		return 0;
   1947 
   1948 	/* Need disable ras on all IPs here before ip [hw/sw]fini */
   1949 	amdgpu_ras_disable_all_features(adev, 0);
   1950 	amdgpu_ras_recovery_fini(adev);
   1951 	return 0;
   1952 }
   1953 
   1954 int amdgpu_ras_fini(struct amdgpu_device *adev)
   1955 {
   1956 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
   1957 
   1958 	if (!con)
   1959 		return 0;
   1960 
   1961 	amdgpu_ras_fs_fini(adev);
   1962 	amdgpu_ras_interrupt_remove_all(adev);
   1963 
   1964 	WARN(con->features, "Feature mask is not cleared");
   1965 
   1966 	if (con->features)
   1967 		amdgpu_ras_disable_all_features(adev, 1);
   1968 
   1969 	amdgpu_ras_set_context(adev, NULL);
   1970 	kfree(con);
   1971 
   1972 	return 0;
   1973 }
   1974 
   1975 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
   1976 {
   1977 	uint32_t hw_supported, supported;
   1978 
   1979 	amdgpu_ras_check_supported(adev, &hw_supported, &supported);
   1980 	if (!hw_supported)
   1981 		return;
   1982 
   1983 	if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
   1984 		DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n");
   1985 
   1986 		amdgpu_ras_reset_gpu(adev);
   1987 	}
   1988 }
   1989