Merge tag 'perf-tools-fixes-for-v6.17-2025-09-16' of git://git.kernel.org/pub/scm...
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ras.c
CommitLineData
c030f2e4 1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/debugfs.h>
25#include <linux/list.h>
26#include <linux/module.h>
f867723b 27#include <linux/uaccess.h>
7c6e68c7
AG
28#include <linux/reboot.h>
29#include <linux/syscalls.h>
05adfd80 30#include <linux/pm_runtime.h>
dbf3850d 31#include <linux/list_sort.h>
f867723b 32
c030f2e4 33#include "amdgpu.h"
34#include "amdgpu_ras.h"
b404ae82 35#include "amdgpu_atomfirmware.h"
19744f5f 36#include "amdgpu_xgmi.h"
4e644fff 37#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
9af357bc 38#include "nbio_v4_3.h"
ecd1191e 39#include "nbif_v6_3_1.h"
7692e1ee 40#include "nbio_v7_9.h"
f50160cf 41#include "atom.h"
25a2b22e 42#include "amdgpu_reset.h"
4e2965bd 43#include "amdgpu_psp.h"
25a2b22e 44
12b2cab7
MJ
45#ifdef CONFIG_X86_MCE_AMD
46#include <asm/mce.h>
c030f2e4 47
12b2cab7
MJ
48static bool notifier_registered;
49#endif
eb0c3cd4
GC
50static const char *RAS_FS_NAME = "ras";
51
c030f2e4 52const char *ras_error_string[] = {
53 "none",
54 "parity",
55 "single_correctable",
56 "multi_uncorrectable",
57 "poison",
58};
59
60const char *ras_block_string[] = {
61 "umc",
62 "sdma",
63 "gfx",
64 "mmhub",
65 "athub",
66 "pcie_bif",
67 "hdp",
68 "xgmi_wafl",
69 "df",
70 "smn",
71 "sem",
72 "mp0",
73 "mp1",
74 "fuse",
640ae42e 75 "mca",
a3d63c62
MZZ
76 "vcn",
77 "jpeg",
7ed97155
YW
78 "ih",
79 "mpio",
cc11dffc 80 "mmsch",
c030f2e4 81};
82
640ae42e
JC
83const char *ras_mca_block_string[] = {
84 "mca_mp0",
85 "mca_mp1",
86 "mca_mpio",
87 "mca_iohc",
88};
89
d5e8ff5f 90struct amdgpu_ras_block_list {
91 /* ras block link */
92 struct list_head node;
93
94 struct amdgpu_ras_block_object *ras_obj;
95};
96
640ae42e
JC
97const char *get_ras_block_str(struct ras_common_if *ras_block)
98{
99 if (!ras_block)
100 return "NULL";
101
7ed97155
YW
102 if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT ||
103 ras_block->block >= ARRAY_SIZE(ras_block_string))
640ae42e
JC
104 return "OUT OF RANGE";
105
106 if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
107 return ras_mca_block_string[ras_block->sub_block_index];
108
109 return ras_block_string[ras_block->block];
110}
111
954ea6aa 112#define ras_block_str(_BLOCK_) \
113 (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
8b0fb0e9 114
c030f2e4 115#define ras_err_str(i) (ras_error_string[ffs(i)])
c030f2e4 116
108c6a63 117#define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
118
7cdc2ee3
TZ
119/* inject address is 52 bits */
120#define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
121
e4e6a589
LT
122/* typical ECC bad page rate is 1 bad page per 100MB VRAM */
123#define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL)
c84d4670 124
78146c1d 125#define MAX_UMC_POISON_POLLING_TIME_ASYNC 300 //ms
6c23f3d1 126
2cf8e50e
YC
127#define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100 //ms
128
c0470691
YC
129#define MAX_FLUSH_RETIRE_DWORK_TIMES 100
130
52dd95f2
GC
131enum amdgpu_ras_retire_page_reservation {
132 AMDGPU_RAS_RETIRE_PAGE_RESERVED,
133 AMDGPU_RAS_RETIRE_PAGE_PENDING,
134 AMDGPU_RAS_RETIRE_PAGE_FAULT,
135};
7c6e68c7
AG
136
137atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
138
676deb38
DL
139static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
140 uint64_t addr);
6e4be987
TZ
141static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
142 uint64_t addr);
12b2cab7 143#ifdef CONFIG_X86_MCE_AMD
91a1a52d
MJ
144static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
145struct mce_notifier_adev_list {
146 struct amdgpu_device *devs[MAX_GPU_INSTANCE];
147 int num_gpu;
148};
149static struct mce_notifier_adev_list mce_adev_list;
12b2cab7 150#endif
6e4be987 151
61380faa
JC
152void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
153{
a9d82d2f 154 if (adev && amdgpu_ras_get_context(adev))
61380faa
JC
155 amdgpu_ras_get_context(adev)->error_query_ready = ready;
156}
157
f3167919 158static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
61380faa 159{
a9d82d2f 160 if (adev && amdgpu_ras_get_context(adev))
61380faa
JC
161 return amdgpu_ras_get_context(adev)->error_query_ready;
162
163 return false;
164}
165
cbb8f989
JC
166static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
167{
5b1270be 168 struct ras_err_data err_data;
cbb8f989 169 struct eeprom_table_record err_rec;
5b1270be 170 int ret;
cbb8f989
JC
171
172 if ((address >= adev->gmc.mc_vram_size) ||
173 (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
174 dev_warn(adev->dev,
175 "RAS WARN: input address 0x%llx is invalid.\n",
176 address);
177 return -EINVAL;
178 }
179
180 if (amdgpu_ras_check_bad_page(adev, address)) {
181 dev_warn(adev->dev,
80b0cd0f 182 "RAS WARN: 0x%llx has already been marked as bad page!\n",
cbb8f989
JC
183 address);
184 return 0;
185 }
186
5b1270be
YW
187 ret = amdgpu_ras_error_data_init(&err_data);
188 if (ret)
189 return ret;
190
cbb8f989 191 memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
cbb8f989 192 err_data.err_addr = &err_rec;
71344a71 193 amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0);
cbb8f989
JC
194
195 if (amdgpu_bad_page_threshold != 0) {
196 amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
a8d133e6 197 err_data.err_addr_cnt, false);
4d33e0f1 198 amdgpu_ras_save_bad_pages(adev, NULL);
cbb8f989
JC
199 }
200
5b1270be
YW
201 amdgpu_ras_error_data_fini(&err_data);
202
cbb8f989
JC
203 dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
204 dev_warn(adev->dev, "Clear EEPROM:\n");
205 dev_warn(adev->dev, " echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
206
207 return 0;
208}
209
c030f2e4 210static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
211 size_t size, loff_t *pos)
212{
213 struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
214 struct ras_query_if info = {
215 .head = obj->head,
216 };
217 ssize_t s;
218 char val[128];
219
761d86d3 220 if (amdgpu_ras_query_error_status(obj->adev, &info))
c030f2e4 221 return -EINVAL;
222
2a460963 223 /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
4e8303cf
LL
224 if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
225 amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
2a460963
CL
226 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
227 dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
228 }
229
c030f2e4 230 s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
231 "ue", info.ue_count,
232 "ce", info.ce_count);
233 if (*pos >= s)
234 return 0;
235
236 s -= *pos;
237 s = min_t(u64, s, size);
238
239
240 if (copy_to_user(buf, &val[*pos], s))
241 return -EINVAL;
242
243 *pos += s;
244
245 return s;
246}
247
c030f2e4 248static const struct file_operations amdgpu_ras_debugfs_ops = {
249 .owner = THIS_MODULE,
250 .read = amdgpu_ras_debugfs_read,
190211ab 251 .write = NULL,
c030f2e4 252 .llseek = default_llseek
253};
254
96ebb307 255static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
256{
257 int i;
258
259 for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
260 *block_id = i;
640ae42e 261 if (strcmp(name, ras_block_string[i]) == 0)
96ebb307 262 return 0;
263 }
264 return -EINVAL;
265}
266
267static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
268 const char __user *buf, size_t size,
269 loff_t *pos, struct ras_debug_if *data)
270{
271 ssize_t s = min_t(u64, 64, size);
272 char str[65];
273 char block_name[33];
274 char err[9] = "ue";
275 int op = -1;
276 int block_id;
44494f96 277 uint32_t sub_block;
96ebb307 278 u64 address, value;
2c22ed0b
TZ
279 /* default value is 0 if the mask is not set by user */
280 u32 instance_mask = 0;
96ebb307 281
282 if (*pos)
283 return -EINVAL;
284 *pos = size;
285
286 memset(str, 0, sizeof(str));
287 memset(data, 0, sizeof(*data));
288
289 if (copy_from_user(str, buf, s))
290 return -EINVAL;
291
292 if (sscanf(str, "disable %32s", block_name) == 1)
293 op = 0;
294 else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
295 op = 1;
296 else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
297 op = 2;
6df23f4c 298 else if (strstr(str, "retire_page") != NULL)
cbb8f989 299 op = 3;
b076296b 300 else if (str[0] && str[1] && str[2] && str[3])
96ebb307 301 /* ascii string, but commands are not matched. */
302 return -EINVAL;
303
304 if (op != -1) {
cbb8f989 305 if (op == 3) {
546aa546
LT
306 if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
307 sscanf(str, "%*s %llu", &address) != 1)
6cb7a1d4 308 return -EINVAL;
cbb8f989
JC
309
310 data->op = op;
311 data->inject.address = address;
312
313 return 0;
314 }
315
96ebb307 316 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
317 return -EINVAL;
318
319 data->head.block = block_id;
fb1e9171 320 /* only ue, ce and poison errors are supported */
e1063493
TZ
321 if (!memcmp("ue", err, 2))
322 data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
323 else if (!memcmp("ce", err, 2))
324 data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
fb1e9171
CL
325 else if (!memcmp("poison", err, 6))
326 data->head.type = AMDGPU_RAS_ERROR__POISON;
e1063493
TZ
327 else
328 return -EINVAL;
329
96ebb307 330 data->op = op;
331
332 if (op == 2) {
2c22ed0b
TZ
333 if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x",
334 &sub_block, &address, &value, &instance_mask) != 4 &&
335 sscanf(str, "%*s %*s %*s %u %llu %llu %u",
336 &sub_block, &address, &value, &instance_mask) != 4 &&
337 sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
546aa546
LT
338 &sub_block, &address, &value) != 3 &&
339 sscanf(str, "%*s %*s %*s %u %llu %llu",
6cb7a1d4
LT
340 &sub_block, &address, &value) != 3)
341 return -EINVAL;
44494f96 342 data->head.sub_block_index = sub_block;
96ebb307 343 data->inject.address = address;
344 data->inject.value = value;
2c22ed0b 345 data->inject.instance_mask = instance_mask;
96ebb307 346 }
347 } else {
73aa8e1a 348 if (size < sizeof(*data))
96ebb307 349 return -EINVAL;
350
351 if (copy_from_user(data, buf, sizeof(*data)))
352 return -EINVAL;
353 }
354
355 return 0;
356}
7c6e68c7 357
f464c5dd
TZ
358static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
359 struct ras_debug_if *data)
360{
361 int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
362 uint32_t mask, inst_mask = data->inject.instance_mask;
363
364 /* no need to set instance mask if there is only one instance */
365 if (num_xcc <= 1 && inst_mask) {
366 data->inject.instance_mask = 0;
367 dev_dbg(adev->dev,
368 "RAS inject mask(0x%x) isn't supported and force it to 0.\n",
369 inst_mask);
370
371 return;
372 }
373
374 switch (data->head.block) {
375 case AMDGPU_RAS_BLOCK__GFX:
376 mask = GENMASK(num_xcc - 1, 0);
377 break;
378 case AMDGPU_RAS_BLOCK__SDMA:
379 mask = GENMASK(adev->sdma.num_instances - 1, 0);
380 break;
e3959cb5
SY
381 case AMDGPU_RAS_BLOCK__VCN:
382 case AMDGPU_RAS_BLOCK__JPEG:
383 mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0);
384 break;
f464c5dd 385 default:
e3959cb5 386 mask = inst_mask;
f464c5dd
TZ
387 break;
388 }
389
390 /* remove invalid bits in instance mask */
391 data->inject.instance_mask &= mask;
392 if (inst_mask != data->inject.instance_mask)
393 dev_dbg(adev->dev,
394 "Adjust RAS inject mask 0x%x to 0x%x\n",
395 inst_mask, data->inject.instance_mask);
396}
397
74abc221
TSD
398/**
399 * DOC: AMDGPU RAS debugfs control interface
36ea1bd2 400 *
737c375b 401 * The control interface accepts struct ras_debug_if which has two members.
36ea1bd2 402 *
403 * First member: ras_debug_if::head or ras_debug_if::inject.
96ebb307 404 *
405 * head is used to indicate which IP block will be under control.
36ea1bd2 406 *
407 * head has four members, they are block, type, sub_block_index, name.
408 * block: which IP will be under control.
409 * type: what kind of error will be enabled/disabled/injected.
410 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
411 * name: the name of IP.
412 *
2c22ed0b 413 * inject has three more members than head, they are address, value and mask.
36ea1bd2 414 * As their names indicate, inject operation will write the
415 * value to the address.
416 *
ef177d11 417 * The second member: struct ras_debug_if::op.
c688a06b 418 * It has three kinds of operations.
879e723d
AZ
419 *
420 * - 0: disable RAS on the block. Take ::head as its data.
421 * - 1: enable RAS on the block. Take ::head as its data.
422 * - 2: inject errors on the block. Take ::inject as its data.
36ea1bd2 423 *
96ebb307 424 * How to use the interface?
ef177d11 425 *
737c375b 426 * In a program
ef177d11 427 *
737c375b
LT
428 * Copy the struct ras_debug_if in your code and initialize it.
429 * Write the struct to the control interface.
ef177d11 430 *
737c375b 431 * From shell
96ebb307 432 *
879e723d
AZ
433 * .. code-block:: bash
434 *
737c375b
LT
435 * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
436 * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
2c22ed0b 437 * echo "inject <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
879e723d 438 *
737c375b 439 * Where N, is the card which you want to affect.
ef177d11 440 *
737c375b
LT
441 * "disable" requires only the block.
442 * "enable" requires the block and error type.
443 * "inject" requires the block, error type, address, and value.
c666bbf0 444 *
737c375b 445 * The block is one of: umc, sdma, gfx, etc.
879e723d 446 * see ras_block_string[] for details
c666bbf0 447 *
fb1e9171 448 * The error type is one of: ue, ce and poison where,
737c375b
LT
449 * ue is multi-uncorrectable
450 * ce is single-correctable
fb1e9171 451 * poison is poison
c666bbf0 452 *
737c375b
LT
453 * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
454 * The address and value are hexadecimal numbers, leading 0x is optional.
2c22ed0b 455 * The mask means instance mask, is optional, default value is 0x1.
879e723d 456 *
737c375b 457 * For instance,
879e723d
AZ
458 *
459 * .. code-block:: bash
96ebb307 460 *
44494f96 461 * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
2c22ed0b 462 * echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl
96ebb307 463 * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
464 *
737c375b 465 * How to check the result of the operation?
36ea1bd2 466 *
737c375b 467 * To check disable/enable, see "ras" features at,
36ea1bd2 468 * /sys/class/drm/card[0/1/2...]/device/ras/features
469 *
737c375b
LT
470 * To check inject, see the corresponding error count at,
471 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
36ea1bd2 472 *
879e723d 473 * .. note::
ef177d11 474 * Operations are only allowed on blocks which are supported.
737c375b 475 * Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
ef177d11
AD
476 * to see which blocks support RAS on a particular asic.
477 *
36ea1bd2 478 */
cf696091
LT
479static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
480 const char __user *buf,
481 size_t size, loff_t *pos)
36ea1bd2 482{
483 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
484 struct ras_debug_if data;
485 int ret = 0;
486
61380faa 487 if (!amdgpu_ras_get_error_query_ready(adev)) {
6952e99c
GC
488 dev_warn(adev->dev, "RAS WARN: error injection "
489 "currently inaccessible\n");
43c4d576
JC
490 return size;
491 }
492
96ebb307 493 ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
494 if (ret)
cf696091 495 return ret;
36ea1bd2 496
80b0cd0f 497 if (data.op == 3) {
cbb8f989 498 ret = amdgpu_reserve_page_direct(adev, data.inject.address);
80b0cd0f 499 if (!ret)
cbb8f989
JC
500 return size;
501 else
502 return ret;
503 }
504
36ea1bd2 505 if (!amdgpu_ras_is_supported(adev, data.head.block))
506 return -EINVAL;
507
508 switch (data.op) {
509 case 0:
510 ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
511 break;
512 case 1:
513 ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
514 break;
515 case 2:
43aedbf4
SY
516 if ((data.inject.address >= adev->gmc.mc_vram_size &&
517 adev->gmc.mc_vram_size) ||
7cdc2ee3 518 (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
b0d4783a
GC
519 dev_warn(adev->dev, "RAS WARN: input address "
520 "0x%llx is invalid.",
521 data.inject.address);
7cdc2ee3
TZ
522 ret = -EINVAL;
523 break;
524 }
525
6e4be987
TZ
526 /* umc ce/ue error injection for a bad page is not allowed */
527 if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
528 amdgpu_ras_check_bad_page(adev, data.inject.address)) {
c65b0805
LT
529 dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
530 "already been marked as bad!\n",
531 data.inject.address);
6e4be987
TZ
532 break;
533 }
534
f464c5dd
TZ
535 amdgpu_ras_instance_mask_check(adev, &data);
536
7cdc2ee3 537 /* data.inject.address is offset instead of absolute gpu address */
36ea1bd2 538 ret = amdgpu_ras_error_inject(adev, &data.inject);
539 break;
96ebb307 540 default:
541 ret = -EINVAL;
542 break;
374bf7bd 543 }
36ea1bd2 544
545 if (ret)
79c04621 546 return ret;
36ea1bd2 547
548 return size;
549}
550
084fe13b
AG
551/**
552 * DOC: AMDGPU RAS debugfs EEPROM table reset interface
553 *
f77c7109 554 * Some boards contain an EEPROM which is used to persistently store a list of
ef177d11 555 * bad pages which experiences ECC errors in vram. This interface provides
f77c7109
AD
556 * a way to reset the EEPROM, e.g., after testing error injection.
557 *
558 * Usage:
559 *
560 * .. code-block:: bash
561 *
562 * echo 1 > ../ras/ras_eeprom_reset
563 *
564 * will reset EEPROM table to 0 entries.
565 *
084fe13b 566 */
cf696091
LT
567static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
568 const char __user *buf,
569 size_t size, loff_t *pos)
084fe13b 570{
bf0b91b7
GC
571 struct amdgpu_device *adev =
572 (struct amdgpu_device *)file_inode(f)->i_private;
084fe13b
AG
573 int ret;
574
bf0b91b7 575 ret = amdgpu_ras_eeprom_reset_table(
cf696091 576 &(amdgpu_ras_get_context(adev)->eeprom_control));
084fe13b 577
63d4c081 578 if (!ret) {
cf696091
LT
579 /* Something was written to EEPROM.
580 */
bf0b91b7
GC
581 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
582 return size;
583 } else {
cf696091 584 return ret;
bf0b91b7 585 }
084fe13b
AG
586}
587
36ea1bd2 588static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
589 .owner = THIS_MODULE,
590 .read = NULL,
591 .write = amdgpu_ras_debugfs_ctrl_write,
592 .llseek = default_llseek
593};
594
084fe13b
AG
595static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
596 .owner = THIS_MODULE,
597 .read = NULL,
598 .write = amdgpu_ras_debugfs_eeprom_write,
599 .llseek = default_llseek
600};
601
f77c7109
AD
602/**
603 * DOC: AMDGPU RAS sysfs Error Count Interface
604 *
ef177d11 605 * It allows the user to read the error count for each IP block on the gpu through
f77c7109
AD
606 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
607 *
608 * It outputs the multiple lines which report the uncorrected (ue) and corrected
609 * (ce) error counts.
610 *
611 * The format of one line is below,
612 *
613 * [ce|ue]: count
614 *
615 * Example:
616 *
617 * .. code-block:: bash
618 *
619 * ue: 0
620 * ce: 1
621 *
622 */
c030f2e4 623static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
624 struct device_attribute *attr, char *buf)
625{
626 struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
627 struct ras_query_if info = {
628 .head = obj->head,
629 };
630
61380faa 631 if (!amdgpu_ras_get_error_query_ready(obj->adev))
36000c7a 632 return sysfs_emit(buf, "Query currently inaccessible\n");
43c4d576 633
761d86d3 634 if (amdgpu_ras_query_error_status(obj->adev, &info))
c030f2e4 635 return -EINVAL;
636
4e8303cf
LL
637 if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
638 amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
1f0d8e37 639 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
2a460963 640 dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
1f0d8e37
MJ
641 }
642
2c7a1560
SY
643 if (info.head.block == AMDGPU_RAS_BLOCK__UMC)
644 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
645 "ce", info.ce_count, "de", info.de_count);
646 else
647 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
648 "ce", info.ce_count);
c030f2e4 649}
650
651/* obj begin */
652
653#define get_obj(obj) do { (obj)->use++; } while (0)
654#define alive_obj(obj) ((obj)->use)
655
656static inline void put_obj(struct ras_manager *obj)
657{
ec3e0a91 658 if (obj && (--obj->use == 0)) {
c030f2e4 659 list_del(&obj->node);
ec3e0a91
YW
660 amdgpu_ras_error_data_fini(&obj->err_data);
661 }
662
f0872686 663 if (obj && (obj->use < 0))
640ae42e 664 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
c030f2e4 665}
666
667/* make one obj and return it. */
668static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
669 struct ras_common_if *head)
670{
671 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
672 struct ras_manager *obj;
673
8ab0d6f0 674 if (!adev->ras_enabled || !con)
c030f2e4 675 return NULL;
676
677 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
678 return NULL;
679
640ae42e
JC
680 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
681 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
682 return NULL;
683
684 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
685 } else
686 obj = &con->objs[head->block];
687
c030f2e4 688 /* already exist. return obj? */
689 if (alive_obj(obj))
690 return NULL;
691
ec3e0a91
YW
692 if (amdgpu_ras_error_data_init(&obj->err_data))
693 return NULL;
694
c030f2e4 695 obj->head = *head;
696 obj->adev = adev;
697 list_add(&obj->node, &con->head);
698 get_obj(obj);
699
700 return obj;
701}
702
703/* return an obj equal to head, or the first when head is NULL */
f2a79be1 704struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
c030f2e4 705 struct ras_common_if *head)
706{
707 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
708 struct ras_manager *obj;
709 int i;
710
8ab0d6f0 711 if (!adev->ras_enabled || !con)
c030f2e4 712 return NULL;
713
714 if (head) {
715 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
716 return NULL;
717
640ae42e
JC
718 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
719 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
720 return NULL;
721
722 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
723 } else
724 obj = &con->objs[head->block];
c030f2e4 725
640ae42e 726 if (alive_obj(obj))
c030f2e4 727 return obj;
c030f2e4 728 } else {
640ae42e 729 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
c030f2e4 730 obj = &con->objs[i];
640ae42e 731 if (alive_obj(obj))
c030f2e4 732 return obj;
c030f2e4 733 }
734 }
735
736 return NULL;
737}
738/* obj end */
739
740/* feature ctl begin */
741static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
e509965e 742 struct ras_common_if *head)
c030f2e4 743{
8ab0d6f0 744 return adev->ras_hw_enabled & BIT(head->block);
c030f2e4 745}
746
747static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
748 struct ras_common_if *head)
749{
750 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
751
752 return con->features & BIT(head->block);
753}
754
755/*
756 * if obj is not created, then create one.
757 * set feature enable flag.
758 */
759static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
760 struct ras_common_if *head, int enable)
761{
762 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
763 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
764
5caf466a 765 /* If hardware does not support ras, then do not create obj.
766 * But if hardware support ras, we can create the obj.
767 * Ras framework checks con->hw_supported to see if it need do
768 * corresponding initialization.
769 * IP checks con->support to see if it need disable ras.
770 */
c030f2e4 771 if (!amdgpu_ras_is_feature_allowed(adev, head))
772 return 0;
c030f2e4 773
774 if (enable) {
775 if (!obj) {
776 obj = amdgpu_ras_create_obj(adev, head);
777 if (!obj)
778 return -EINVAL;
779 } else {
780 /* In case we create obj somewhere else */
781 get_obj(obj);
782 }
783 con->features |= BIT(head->block);
784 } else {
785 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
19d0dfda 786 con->features &= ~BIT(head->block);
c030f2e4 787 put_obj(obj);
788 }
789 }
790
791 return 0;
792}
793
794/* wrapper of psp_ras_enable_features */
795int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
796 struct ras_common_if *head, bool enable)
797{
798 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
7fcffecf 799 union ta_ras_cmd_input *info;
bf7aa8be 800 int ret;
c030f2e4 801
802 if (!con)
803 return -EINVAL;
804
ec70578c
HZ
805 /* For non-gfx ip, do not enable ras feature if it is not allowed */
806 /* For gfx ip, regardless of feature support status, */
807 /* Force issue enable or disable ras feature commands */
808 if (head->block != AMDGPU_RAS_BLOCK__GFX &&
6fc9d92c 809 !amdgpu_ras_is_feature_allowed(adev, head))
bf7aa8be 810 return 0;
6fc9d92c
HZ
811
812 /* Only enable gfx ras feature from host side */
813 if (head->block == AMDGPU_RAS_BLOCK__GFX &&
814 !amdgpu_sriov_vf(adev) &&
815 !amdgpu_ras_intr_triggered()) {
26093ce1
SY
816 info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
817 if (!info)
818 return -ENOMEM;
819
820 if (!enable) {
821 info->disable_features = (struct ta_ras_disable_features_input) {
822 .block_id = amdgpu_ras_block_to_ta(head->block),
823 .error_type = amdgpu_ras_error_to_ta(head->type),
824 };
825 } else {
826 info->enable_features = (struct ta_ras_enable_features_input) {
827 .block_id = amdgpu_ras_block_to_ta(head->block),
828 .error_type = amdgpu_ras_error_to_ta(head->type),
829 };
830 }
c030f2e4 831
7fcffecf 832 ret = psp_ras_enable_features(&adev->psp, info, enable);
bff77e86 833 if (ret) {
e4348849 834 dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
011907fd 835 enable ? "enable":"disable",
640ae42e 836 get_ras_block_str(head),
e4348849 837 amdgpu_ras_is_poison_mode_supported(adev), ret);
5838f74c 838 kfree(info);
bf7aa8be 839 return ret;
bff77e86 840 }
bf7aa8be
HZ
841
842 kfree(info);
c030f2e4 843 }
844
845 /* setup the obj */
846 __amdgpu_ras_feature_enable(adev, head, enable);
bf7aa8be
HZ
847
848 return 0;
c030f2e4 849}
850
77de502b 851/* Only used in device probe stage and called only once. */
852int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
853 struct ras_common_if *head, bool enable)
854{
855 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
856 int ret;
857
858 if (!con)
859 return -EINVAL;
860
861 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
7af23ebe 862 if (enable) {
863 /* There is no harm to issue a ras TA cmd regardless of
864 * the currecnt ras state.
865 * If current state == target state, it will do nothing
866 * But sometimes it requests driver to reset and repost
867 * with error code -EAGAIN.
868 */
869 ret = amdgpu_ras_feature_enable(adev, head, 1);
870 /* With old ras TA, we might fail to enable ras.
871 * Log it and just setup the object.
872 * TODO need remove this WA in the future.
873 */
874 if (ret == -EINVAL) {
875 ret = __amdgpu_ras_feature_enable(adev, head, 1);
876 if (!ret)
6952e99c
GC
877 dev_info(adev->dev,
878 "RAS INFO: %s setup object\n",
640ae42e 879 get_ras_block_str(head));
7af23ebe 880 }
881 } else {
882 /* setup the object then issue a ras TA disable cmd.*/
883 ret = __amdgpu_ras_feature_enable(adev, head, 1);
884 if (ret)
885 return ret;
77de502b 886
0110ac11 887 /* gfx block ras disable cmd must send to ras-ta */
970fd197
SY
888 if (head->block == AMDGPU_RAS_BLOCK__GFX)
889 con->features |= BIT(head->block);
890
77de502b 891 ret = amdgpu_ras_feature_enable(adev, head, 0);
19d0dfda
SY
892
893 /* clean gfx block ras features flag */
8ab0d6f0 894 if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
19d0dfda 895 con->features &= ~BIT(head->block);
7af23ebe 896 }
77de502b 897 } else
898 ret = amdgpu_ras_feature_enable(adev, head, enable);
899
900 return ret;
901}
902
c030f2e4 903static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
904 bool bypass)
905{
906 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
907 struct ras_manager *obj, *tmp;
908
909 list_for_each_entry_safe(obj, tmp, &con->head, node) {
910 /* bypass psp.
911 * aka just release the obj and corresponding flags
912 */
913 if (bypass) {
914 if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
915 break;
916 } else {
917 if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
918 break;
919 }
289d513b 920 }
c030f2e4 921
922 return con->features;
923}
924
925static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
926 bool bypass)
927{
928 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
c030f2e4 929 int i;
640ae42e 930 const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
c030f2e4 931
640ae42e 932 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
c030f2e4 933 struct ras_common_if head = {
934 .block = i,
191051a1 935 .type = default_ras_type,
c030f2e4 936 .sub_block_index = 0,
937 };
640ae42e
JC
938
939 if (i == AMDGPU_RAS_BLOCK__MCA)
940 continue;
941
942 if (bypass) {
943 /*
944 * bypass psp. vbios enable ras for us.
945 * so just create the obj
946 */
947 if (__amdgpu_ras_feature_enable(adev, &head, 1))
948 break;
949 } else {
950 if (amdgpu_ras_feature_enable(adev, &head, 1))
951 break;
952 }
953 }
954
955 for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
956 struct ras_common_if head = {
957 .block = AMDGPU_RAS_BLOCK__MCA,
958 .type = default_ras_type,
959 .sub_block_index = i,
960 };
961
c030f2e4 962 if (bypass) {
963 /*
964 * bypass psp. vbios enable ras for us.
965 * so just create the obj
966 */
967 if (__amdgpu_ras_feature_enable(adev, &head, 1))
968 break;
969 } else {
970 if (amdgpu_ras_feature_enable(adev, &head, 1))
971 break;
972 }
289d513b 973 }
c030f2e4 974
975 return con->features;
976}
977/* feature ctl end */
978
e3d833f4 979static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
980 enum amdgpu_ras_block block)
6492e1b0 981{
b6efdb02 982 if (!block_obj)
6492e1b0 983 return -EINVAL;
984
bdb3489c 985 if (block_obj->ras_comm.block == block)
6492e1b0 986 return 0;
640ae42e 987
6492e1b0 988 return -EINVAL;
989}
990
b6efdb02 991static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
6492e1b0 992 enum amdgpu_ras_block block, uint32_t sub_block_index)
640ae42e 993{
d5e8ff5f 994 struct amdgpu_ras_block_list *node, *tmp;
995 struct amdgpu_ras_block_object *obj;
6492e1b0 996
997 if (block >= AMDGPU_RAS_BLOCK__LAST)
998 return NULL;
999
d5e8ff5f 1000 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
1001 if (!node->ras_obj) {
1002 dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
1003 continue;
1004 }
1005
1006 obj = node->ras_obj;
6492e1b0 1007 if (obj->ras_block_match) {
1008 if (obj->ras_block_match(obj, block, sub_block_index) == 0)
1009 return obj;
1010 } else {
1011 if (amdgpu_ras_block_match_default(obj, block) == 0)
1012 return obj;
1013 }
640ae42e 1014 }
6492e1b0 1015
1016 return NULL;
640ae42e
JC
1017}
1018
fdcb279d
SY
1019static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
1020{
1021 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1022 int ret = 0;
1023
1024 /*
1025 * choosing right query method according to
1026 * whether smu support query error information
1027 */
bc143d8b 1028 ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
fdcb279d 1029 if (ret == -EOPNOTSUPP) {
efe17d5a 1030 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1031 adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
1032 adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
fdcb279d
SY
1033
1034 /* umc query_ras_error_address is also responsible for clearing
1035 * error status
1036 */
efe17d5a 1037 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1038 adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
1039 adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
fdcb279d 1040 } else if (!ret) {
efe17d5a 1041 if (adev->umc.ras &&
1042 adev->umc.ras->ecc_info_query_ras_error_count)
1043 adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
fdcb279d 1044
efe17d5a 1045 if (adev->umc.ras &&
1046 adev->umc.ras->ecc_info_query_ras_error_address)
1047 adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
fdcb279d
SY
1048 }
1049}
1050
5b1270be 1051static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
ec3e0a91 1052 struct ras_manager *ras_mgr,
5b1270be 1053 struct ras_err_data *err_data,
9dc57c2a 1054 struct ras_query_context *qctx,
ec3e0a91 1055 const char *blk_name,
46e2231c
CL
1056 bool is_ue,
1057 bool is_de)
5b1270be 1058{
5b1270be
YW
1059 struct amdgpu_smuio_mcm_config_info *mcm_info;
1060 struct ras_err_node *err_node;
1061 struct ras_err_info *err_info;
75ac6a25 1062 u64 event_id = qctx->evid.event_id;
5b1270be 1063
ec3e0a91
YW
1064 if (is_ue) {
1065 for_each_ras_error(err_node, err_data) {
1066 err_info = &err_node->err_info;
1067 mcm_info = &err_info->mcm_info;
1068 if (err_info->ue_count) {
9dc57c2a
YW
1069 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1070 "%lld new uncorrectable hardware errors detected in %s block\n",
1071 mcm_info->socket_id,
1072 mcm_info->die_id,
1073 err_info->ue_count,
1074 blk_name);
ec3e0a91
YW
1075 }
1076 }
1077
1078 for_each_ras_error(err_node, &ras_mgr->err_data) {
1079 err_info = &err_node->err_info;
1080 mcm_info = &err_info->mcm_info;
9dc57c2a
YW
1081 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1082 "%lld uncorrectable hardware errors detected in total in %s block\n",
1083 mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name);
ec3e0a91
YW
1084 }
1085
1086 } else {
46e2231c
CL
1087 if (is_de) {
1088 for_each_ras_error(err_node, err_data) {
1089 err_info = &err_node->err_info;
1090 mcm_info = &err_info->mcm_info;
1091 if (err_info->de_count) {
9dc57c2a
YW
1092 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1093 "%lld new deferred hardware errors detected in %s block\n",
1094 mcm_info->socket_id,
1095 mcm_info->die_id,
1096 err_info->de_count,
1097 blk_name);
46e2231c
CL
1098 }
1099 }
1100
1101 for_each_ras_error(err_node, &ras_mgr->err_data) {
1102 err_info = &err_node->err_info;
1103 mcm_info = &err_info->mcm_info;
9dc57c2a
YW
1104 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1105 "%lld deferred hardware errors detected in total in %s block\n",
1106 mcm_info->socket_id, mcm_info->die_id,
1107 err_info->de_count, blk_name);
46e2231c
CL
1108 }
1109 } else {
f4341197
XL
1110 if (adev->debug_disable_ce_logs)
1111 return;
1112
46e2231c
CL
1113 for_each_ras_error(err_node, err_data) {
1114 err_info = &err_node->err_info;
1115 mcm_info = &err_info->mcm_info;
1116 if (err_info->ce_count) {
9dc57c2a
YW
1117 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1118 "%lld new correctable hardware errors detected in %s block\n",
1119 mcm_info->socket_id,
1120 mcm_info->die_id,
1121 err_info->ce_count,
1122 blk_name);
46e2231c 1123 }
ec3e0a91 1124 }
ec3e0a91 1125
46e2231c
CL
1126 for_each_ras_error(err_node, &ras_mgr->err_data) {
1127 err_info = &err_node->err_info;
1128 mcm_info = &err_info->mcm_info;
9dc57c2a
YW
1129 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1130 "%lld correctable hardware errors detected in total in %s block\n",
1131 mcm_info->socket_id, mcm_info->die_id,
1132 err_info->ce_count, blk_name);
46e2231c 1133 }
5b1270be
YW
1134 }
1135 }
1136}
1137
ec3e0a91
YW
1138static inline bool err_data_has_source_info(struct ras_err_data *data)
1139{
1140 return !list_empty(&data->err_node_list);
1141}
1142
5b1270be
YW
1143static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
1144 struct ras_query_if *query_if,
9dc57c2a
YW
1145 struct ras_err_data *err_data,
1146 struct ras_query_context *qctx)
5b1270be
YW
1147{
1148 struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head);
1149 const char *blk_name = get_ras_block_str(&query_if->head);
75ac6a25 1150 u64 event_id = qctx->evid.event_id;
5b1270be
YW
1151
1152 if (err_data->ce_count) {
ec3e0a91 1153 if (err_data_has_source_info(err_data)) {
9dc57c2a 1154 amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
46e2231c 1155 blk_name, false, false);
5b1270be
YW
1156 } else if (!adev->aid_mask &&
1157 adev->smuio.funcs &&
1158 adev->smuio.funcs->get_socket_id &&
1159 adev->smuio.funcs->get_die_id) {
9dc57c2a
YW
1160 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1161 "%ld correctable hardware errors "
1162 "detected in %s block\n",
1163 adev->smuio.funcs->get_socket_id(adev),
1164 adev->smuio.funcs->get_die_id(adev),
1165 ras_mgr->err_data.ce_count,
1166 blk_name);
5b1270be 1167 } else {
9dc57c2a
YW
1168 RAS_EVENT_LOG(adev, event_id, "%ld correctable hardware errors "
1169 "detected in %s block\n",
1170 ras_mgr->err_data.ce_count,
1171 blk_name);
5b1270be
YW
1172 }
1173 }
1174
1175 if (err_data->ue_count) {
ec3e0a91 1176 if (err_data_has_source_info(err_data)) {
9dc57c2a 1177 amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
46e2231c 1178 blk_name, true, false);
5b1270be
YW
1179 } else if (!adev->aid_mask &&
1180 adev->smuio.funcs &&
1181 adev->smuio.funcs->get_socket_id &&
1182 adev->smuio.funcs->get_die_id) {
9dc57c2a
YW
1183 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1184 "%ld uncorrectable hardware errors "
1185 "detected in %s block\n",
1186 adev->smuio.funcs->get_socket_id(adev),
1187 adev->smuio.funcs->get_die_id(adev),
1188 ras_mgr->err_data.ue_count,
1189 blk_name);
5b1270be 1190 } else {
9dc57c2a
YW
1191 RAS_EVENT_LOG(adev, event_id, "%ld uncorrectable hardware errors "
1192 "detected in %s block\n",
1193 ras_mgr->err_data.ue_count,
1194 blk_name);
5b1270be
YW
1195 }
1196 }
1197
46e2231c
CL
1198 if (err_data->de_count) {
1199 if (err_data_has_source_info(err_data)) {
9dc57c2a 1200 amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
46e2231c
CL
1201 blk_name, false, true);
1202 } else if (!adev->aid_mask &&
1203 adev->smuio.funcs &&
1204 adev->smuio.funcs->get_socket_id &&
1205 adev->smuio.funcs->get_die_id) {
9dc57c2a
YW
1206 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1207 "%ld deferred hardware errors "
1208 "detected in %s block\n",
1209 adev->smuio.funcs->get_socket_id(adev),
1210 adev->smuio.funcs->get_die_id(adev),
1211 ras_mgr->err_data.de_count,
1212 blk_name);
46e2231c 1213 } else {
9dc57c2a
YW
1214 RAS_EVENT_LOG(adev, event_id, "%ld deferred hardware errors "
1215 "detected in %s block\n",
1216 ras_mgr->err_data.de_count,
1217 blk_name);
46e2231c
CL
1218 }
1219 }
5b1270be
YW
1220}
1221
84a2947e
VS
1222static void amdgpu_ras_virt_error_generate_report(struct amdgpu_device *adev,
1223 struct ras_query_if *query_if,
1224 struct ras_err_data *err_data,
1225 struct ras_query_context *qctx)
1226{
1227 unsigned long new_ue, new_ce, new_de;
1228 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &query_if->head);
1229 const char *blk_name = get_ras_block_str(&query_if->head);
1230 u64 event_id = qctx->evid.event_id;
1231
1232 new_ce = err_data->ce_count - obj->err_data.ce_count;
1233 new_ue = err_data->ue_count - obj->err_data.ue_count;
1234 new_de = err_data->de_count - obj->err_data.de_count;
1235
1236 if (new_ce) {
1237 RAS_EVENT_LOG(adev, event_id, "%lu correctable hardware errors "
1238 "detected in %s block\n",
1239 new_ce,
1240 blk_name);
1241 }
1242
1243 if (new_ue) {
1244 RAS_EVENT_LOG(adev, event_id, "%lu uncorrectable hardware errors "
1245 "detected in %s block\n",
1246 new_ue,
1247 blk_name);
1248 }
1249
1250 if (new_de) {
1251 RAS_EVENT_LOG(adev, event_id, "%lu deferred hardware errors "
1252 "detected in %s block\n",
1253 new_de,
1254 blk_name);
1255 }
1256}
1257
ec3e0a91
YW
1258static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data)
1259{
1260 struct ras_err_node *err_node;
1261 struct ras_err_info *err_info;
1262
1263 if (err_data_has_source_info(err_data)) {
1264 for_each_ras_error(err_node, err_data) {
1265 err_info = &err_node->err_info;
46e2231c 1266 amdgpu_ras_error_statistic_de_count(&obj->err_data,
671af066 1267 &err_info->mcm_info, err_info->de_count);
9f91e983 1268 amdgpu_ras_error_statistic_ce_count(&obj->err_data,
671af066 1269 &err_info->mcm_info, err_info->ce_count);
9f91e983 1270 amdgpu_ras_error_statistic_ue_count(&obj->err_data,
671af066 1271 &err_info->mcm_info, err_info->ue_count);
ec3e0a91
YW
1272 }
1273 } else {
1274 /* for legacy asic path which doesn't has error source info */
1275 obj->err_data.ue_count += err_data->ue_count;
1276 obj->err_data.ce_count += err_data->ce_count;
46e2231c 1277 obj->err_data.de_count += err_data->de_count;
ec3e0a91
YW
1278 }
1279}
1280
84a2947e
VS
1281static void amdgpu_ras_mgr_virt_error_data_statistics_update(struct ras_manager *obj,
1282 struct ras_err_data *err_data)
1283{
1284 /* Host reports absolute counts */
1285 obj->err_data.ue_count = err_data->ue_count;
1286 obj->err_data.ce_count = err_data->ce_count;
1287 obj->err_data.de_count = err_data->de_count;
1288}
1289
04c4fcd2
YW
1290static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1291{
1292 struct ras_common_if head;
1293
1294 memset(&head, 0, sizeof(head));
1295 head.block = blk;
1296
1297 return amdgpu_ras_find_obj(adev, &head);
1298}
1299
1300int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1301 const struct aca_info *aca_info, void *data)
1302{
1303 struct ras_manager *obj;
1304
6f3b6913 1305 /* in resume phase, no need to create aca fs node */
e283f4fb 1306 if (adev->in_suspend || amdgpu_reset_in_recovery(adev))
6f3b6913
YC
1307 return 0;
1308
04c4fcd2
YW
1309 obj = get_ras_manager(adev, blk);
1310 if (!obj)
1311 return -EINVAL;
1312
1313 return amdgpu_aca_add_handle(adev, &obj->aca_handle, ras_block_str(blk), aca_info, data);
1314}
1315
1316int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1317{
1318 struct ras_manager *obj;
1319
1320 obj = get_ras_manager(adev, blk);
1321 if (!obj)
1322 return -EINVAL;
1323
1324 amdgpu_aca_remove_handle(&obj->aca_handle);
1325
1326 return 0;
1327}
1328
1329static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
31fd330b
YW
1330 enum aca_error_type type, struct ras_err_data *err_data,
1331 struct ras_query_context *qctx)
04c4fcd2
YW
1332{
1333 struct ras_manager *obj;
1334
1335 obj = get_ras_manager(adev, blk);
1336 if (!obj)
1337 return -EINVAL;
1338
31fd330b 1339 return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data, qctx);
04c4fcd2
YW
1340}
1341
37973b69
YW
1342ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
1343 struct aca_handle *handle, char *buf, void *data)
1344{
1345 struct ras_manager *obj = container_of(handle, struct ras_manager, aca_handle);
1346 struct ras_query_if info = {
1347 .head = obj->head,
1348 };
1349
78347b65
YC
1350 if (!amdgpu_ras_get_error_query_ready(obj->adev))
1351 return sysfs_emit(buf, "Query currently inaccessible\n");
1352
37973b69
YW
1353 if (amdgpu_ras_query_error_status(obj->adev, &info))
1354 return -EINVAL;
1355
865d3397 1356 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
3c603b1f 1357 "ce", info.ce_count, "de", info.de_count);
37973b69
YW
1358}
1359
8cc0f566
HZ
1360static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
1361 struct ras_query_if *info,
1362 struct ras_err_data *err_data,
9dc57c2a 1363 struct ras_query_context *qctx,
8cc0f566 1364 unsigned int error_query_mode)
c030f2e4 1365{
8cc0f566 1366 enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
b6efdb02 1367 struct amdgpu_ras_block_object *block_obj = NULL;
04c4fcd2 1368 int ret;
8cc0f566 1369
b8d55a90
SS
1370 if (blk == AMDGPU_RAS_BLOCK_COUNT)
1371 return -EINVAL;
1372
8cc0f566
HZ
1373 if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY)
1374 return -EINVAL;
1375
84a2947e
VS
1376 if (error_query_mode == AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) {
1377 return amdgpu_virt_req_ras_err_count(adev, blk, err_data);
1378 } else if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
8cc0f566
HZ
1379 if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
1380 amdgpu_ras_get_ecc_info(adev, err_data);
1381 } else {
1382 block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
1383 if (!block_obj || !block_obj->hw_ops) {
1384 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1385 get_ras_block_str(&info->head));
1386 return -EINVAL;
1387 }
1388
1389 if (block_obj->hw_ops->query_ras_error_count)
07ee43fa 1390 block_obj->hw_ops->query_ras_error_count(adev, err_data);
8cc0f566
HZ
1391
1392 if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
1393 (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
1394 (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
1395 if (block_obj->hw_ops->query_ras_error_status)
1396 block_obj->hw_ops->query_ras_error_status(adev);
1397 }
1398 }
1399 } else {
04c4fcd2 1400 if (amdgpu_aca_is_enabled(adev)) {
31fd330b 1401 ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data, qctx);
04c4fcd2
YW
1402 if (ret)
1403 return ret;
1404
31fd330b 1405 ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data, qctx);
04c4fcd2
YW
1406 if (ret)
1407 return ret;
865d3397 1408
31fd330b 1409 ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_DEFERRED, err_data, qctx);
865d3397
YW
1410 if (ret)
1411 return ret;
04c4fcd2
YW
1412 } else {
1413 /* FIXME: add code to check return value later */
9dc57c2a
YW
1414 amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data, qctx);
1415 amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data, qctx);
04c4fcd2 1416 }
8cc0f566
HZ
1417 }
1418
1419 return 0;
1420}
1421
1422/* query/inject/cure begin */
75ac6a25
YW
1423static int amdgpu_ras_query_error_status_with_event(struct amdgpu_device *adev,
1424 struct ras_query_if *info,
1425 enum ras_event_type type)
8cc0f566 1426{
c030f2e4 1427 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
5b1270be 1428 struct ras_err_data err_data;
9dc57c2a 1429 struct ras_query_context qctx;
8cc0f566 1430 unsigned int error_query_mode;
5b1270be 1431 int ret;
c030f2e4 1432
1433 if (!obj)
1434 return -EINVAL;
c030f2e4 1435
5b1270be
YW
1436 ret = amdgpu_ras_error_data_init(&err_data);
1437 if (ret)
1438 return ret;
1439
8cc0f566
HZ
1440 if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode))
1441 return -EINVAL;
7389a5b8 1442
9dc57c2a 1443 memset(&qctx, 0, sizeof(qctx));
75ac6a25
YW
1444 qctx.evid.type = type;
1445 qctx.evid.event_id = amdgpu_ras_acquire_event_id(adev, type);
f852c979
YC
1446
1447 if (!down_read_trylock(&adev->reset_domain->sem)) {
1448 ret = -EIO;
1449 goto out_fini_err_data;
1450 }
1451
8cc0f566
HZ
1452 ret = amdgpu_ras_query_error_status_helper(adev, info,
1453 &err_data,
9dc57c2a 1454 &qctx,
8cc0f566 1455 error_query_mode);
f852c979 1456 up_read(&adev->reset_domain->sem);
8cc0f566
HZ
1457 if (ret)
1458 goto out_fini_err_data;
05a58345 1459
84a2947e
VS
1460 if (error_query_mode != AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) {
1461 amdgpu_rasmgr_error_data_statistic_update(obj, &err_data);
1462 amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx);
1463 } else {
1464 /* Host provides absolute error counts. First generate the report
1465 * using the previous VF internal count against new host count.
1466 * Then Update VF internal count.
1467 */
1468 amdgpu_ras_virt_error_generate_report(adev, info, &err_data, &qctx);
1469 amdgpu_ras_mgr_virt_error_data_statistics_update(obj, &err_data);
1470 }
05a58345 1471
c030f2e4 1472 info->ue_count = obj->err_data.ue_count;
1473 info->ce_count = obj->err_data.ce_count;
46e2231c 1474 info->de_count = obj->err_data.de_count;
c030f2e4 1475
5b1270be
YW
1476out_fini_err_data:
1477 amdgpu_ras_error_data_fini(&err_data);
1478
1479 return ret;
c030f2e4 1480}
1481
75ac6a25
YW
1482int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info)
1483{
1484 return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID);
1485}
1486
472c5fb2 1487int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
761d86d3
DL
1488 enum amdgpu_ras_block block)
1489{
b6efdb02 1490 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
73582be1 1491 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
04c4fcd2 1492 const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
8b0fb0e9 1493
a83f2bf1 1494 if (!block_obj || !block_obj->hw_ops) {
afa37315 1495 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
472c5fb2
TZ
1496 ras_block_str(block));
1497 return -EOPNOTSUPP;
761d86d3
DL
1498 }
1499
d1d4c0b7 1500 if (!amdgpu_ras_is_supported(adev, block) ||
04c4fcd2 1501 !amdgpu_ras_get_aca_debug_mode(adev))
d1d4c0b7
TZ
1502 return -EOPNOTSUPP;
1503
5045c6c6
EP
1504 if (amdgpu_sriov_vf(adev))
1505 return -EOPNOTSUPP;
1506
73582be1 1507 /* skip ras error reset in gpu reset */
7e437167 1508 if ((amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) &&
04c4fcd2
YW
1509 ((smu_funcs && smu_funcs->set_debug_mode) ||
1510 (mca_funcs && mca_funcs->mca_set_debug_mode)))
73582be1
TZ
1511 return -EOPNOTSUPP;
1512
7389a5b8 1513 if (block_obj->hw_ops->reset_ras_error_count)
1514 block_obj->hw_ops->reset_ras_error_count(adev);
5c23e9e0 1515
472c5fb2
TZ
1516 return 0;
1517}
1518
1519int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1520 enum amdgpu_ras_block block)
1521{
1522 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1523
1524 if (amdgpu_ras_reset_error_count(adev, block) == -EOPNOTSUPP)
1525 return 0;
1526
7389a5b8 1527 if ((block == AMDGPU_RAS_BLOCK__GFX) ||
1528 (block == AMDGPU_RAS_BLOCK__MMHUB)) {
8b0fb0e9 1529 if (block_obj->hw_ops->reset_ras_error_status)
1530 block_obj->hw_ops->reset_ras_error_status(adev);
761d86d3 1531 }
5c23e9e0 1532
761d86d3 1533 return 0;
5c23e9e0
JC
1534}
1535
c030f2e4 1536/* wrapper of psp_ras_trigger_error */
1537int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1538 struct ras_inject_if *info)
1539{
1540 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1541 struct ta_ras_trigger_error_input block_info = {
828cfa29 1542 .block_id = amdgpu_ras_block_to_ta(info->head.block),
1543 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
c030f2e4 1544 .sub_block_index = info->head.sub_block_index,
1545 .address = info->address,
1546 .value = info->value,
1547 };
ab3b9de6
YL
1548 int ret = -EINVAL;
1549 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
1550 info->head.block,
1551 info->head.sub_block_index);
c030f2e4 1552
248c9635
TZ
1553 /* inject on guest isn't allowed, return success directly */
1554 if (amdgpu_sriov_vf(adev))
1555 return 0;
1556
c030f2e4 1557 if (!obj)
1558 return -EINVAL;
1559
22d4ba53 1560 if (!block_obj || !block_obj->hw_ops) {
afa37315
LT
1561 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1562 get_ras_block_str(&info->head));
22d4ba53 1563 return -EINVAL;
1564 }
1565
a6c44d25 1566 /* Calculate XGMI relative offset */
a80fe1a6
TZ
1567 if (adev->gmc.xgmi.num_physical_nodes > 1 &&
1568 info->head.block != AMDGPU_RAS_BLOCK__GFX) {
19744f5f
HZ
1569 block_info.address =
1570 amdgpu_xgmi_get_relative_phy_addr(adev,
1571 block_info.address);
a6c44d25
JC
1572 }
1573
27c5f295
TZ
1574 if (block_obj->hw_ops->ras_error_inject) {
1575 if (info->head.block == AMDGPU_RAS_BLOCK__GFX)
2c22ed0b 1576 ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask);
27c5f295 1577 else /* Special ras_error_inject is defined (e.g: xgmi) */
2c22ed0b
TZ
1578 ret = block_obj->hw_ops->ras_error_inject(adev, &block_info,
1579 info->instance_mask);
27c5f295
TZ
1580 } else {
1581 /* default path */
1582 ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask);
a5dd40ca
HZ
1583 }
1584
011907fd
DL
1585 if (ret)
1586 dev_err(adev->dev, "ras inject %s failed %d\n",
640ae42e 1587 get_ras_block_str(&info->head), ret);
c030f2e4 1588
1589 return ret;
1590}
1591
4d9f771e 1592/**
4a1c9a44
HZ
1593 * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
1594 * @adev: pointer to AMD GPU device
1595 * @ce_count: pointer to an integer to be set to the count of correctible errors.
1596 * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
1597 * @query_info: pointer to ras_query_if
1598 *
1599 * Return 0 for query success or do nothing, otherwise return an error
1600 * on failures
1601 */
1602static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
1603 unsigned long *ce_count,
1604 unsigned long *ue_count,
1605 struct ras_query_if *query_info)
1606{
1607 int ret;
1608
1609 if (!query_info)
1610 /* do nothing if query_info is not specified */
1611 return 0;
1612
1613 ret = amdgpu_ras_query_error_status(adev, query_info);
1614 if (ret)
1615 return ret;
1616
1617 *ce_count += query_info->ce_count;
1618 *ue_count += query_info->ue_count;
1619
1620 /* some hardware/IP supports read to clear
1621 * no need to explictly reset the err status after the query call */
4e8303cf
LL
1622 if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
1623 amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
4a1c9a44
HZ
1624 if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
1625 dev_warn(adev->dev,
1626 "Failed to reset error counter and error status\n");
1627 }
1628
1629 return 0;
1630}
1631
1632/**
1633 * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
bbe04dec
IB
1634 * @adev: pointer to AMD GPU device
1635 * @ce_count: pointer to an integer to be set to the count of correctible errors.
1636 * @ue_count: pointer to an integer to be set to the count of uncorrectible
4d9f771e 1637 * errors.
4a1c9a44
HZ
1638 * @query_info: pointer to ras_query_if if the query request is only for
1639 * specific ip block; if info is NULL, then the qurey request is for
1640 * all the ip blocks that support query ras error counters/status
4d9f771e
LT
1641 *
1642 * If set, @ce_count or @ue_count, count and return the corresponding
1643 * error counts in those integer pointers. Return 0 if the device
1644 * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1645 */
1646int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1647 unsigned long *ce_count,
4a1c9a44
HZ
1648 unsigned long *ue_count,
1649 struct ras_query_if *query_info)
c030f2e4 1650{
1651 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1652 struct ras_manager *obj;
a46751fb 1653 unsigned long ce, ue;
4a1c9a44 1654 int ret;
c030f2e4 1655
8ab0d6f0 1656 if (!adev->ras_enabled || !con)
4d9f771e
LT
1657 return -EOPNOTSUPP;
1658
1659 /* Don't count since no reporting.
1660 */
1661 if (!ce_count && !ue_count)
1662 return 0;
c030f2e4 1663
a46751fb
LT
1664 ce = 0;
1665 ue = 0;
4a1c9a44
HZ
1666 if (!query_info) {
1667 /* query all the ip blocks that support ras query interface */
1668 list_for_each_entry(obj, &con->head, node) {
1669 struct ras_query_if info = {
1670 .head = obj->head,
1671 };
c030f2e4 1672
4a1c9a44 1673 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
2a460963 1674 }
4a1c9a44
HZ
1675 } else {
1676 /* query specific ip block */
1677 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
c030f2e4 1678 }
1679
4a1c9a44
HZ
1680 if (ret)
1681 return ret;
1682
a46751fb
LT
1683 if (ce_count)
1684 *ce_count = ce;
1685
1686 if (ue_count)
1687 *ue_count = ue;
4d9f771e
LT
1688
1689 return 0;
c030f2e4 1690}
1691/* query/inject/cure end */
1692
1693
1694/* sysfs begin */
1695
466b1793 1696static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1697 struct ras_badpage **bps, unsigned int *count);
1698
1699static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1700{
1701 switch (flags) {
52dd95f2 1702 case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
466b1793 1703 return "R";
52dd95f2 1704 case AMDGPU_RAS_RETIRE_PAGE_PENDING:
466b1793 1705 return "P";
52dd95f2 1706 case AMDGPU_RAS_RETIRE_PAGE_FAULT:
466b1793 1707 default:
1708 return "F";
aec576f9 1709 }
466b1793 1710}
1711
f77c7109
AD
1712/**
1713 * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
466b1793 1714 *
1715 * It allows user to read the bad pages of vram on the gpu through
1716 * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1717 *
1718 * It outputs multiple lines, and each line stands for one gpu page.
1719 *
1720 * The format of one line is below,
1721 * gpu pfn : gpu page size : flags
1722 *
1723 * gpu pfn and gpu page size are printed in hex format.
1724 * flags can be one of below character,
f77c7109 1725 *
466b1793 1726 * R: reserved, this gpu page is reserved and not able to use.
f77c7109 1727 *
466b1793 1728 * P: pending for reserve, this gpu page is marked as bad, will be reserved
f77c7109
AD
1729 * in next window of page_reserve.
1730 *
466b1793 1731 * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1732 *
f77c7109
AD
1733 * Examples:
1734 *
1735 * .. code-block:: bash
1736 *
1737 * 0x00000001 : 0x00001000 : R
1738 * 0x00000002 : 0x00001000 : P
1739 *
466b1793 1740 */
1741
1742static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
2d0f5001 1743 struct kobject *kobj, const struct bin_attribute *attr,
466b1793 1744 char *buf, loff_t ppos, size_t count)
1745{
1746 struct amdgpu_ras *con =
1747 container_of(attr, struct amdgpu_ras, badpages_attr);
1748 struct amdgpu_device *adev = con->adev;
1749 const unsigned int element_size =
1750 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
d6ee400e
SA
1751 unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1752 unsigned int end = div64_ul(ppos + count - 1, element_size);
466b1793 1753 ssize_t s = 0;
1754 struct ras_badpage *bps = NULL;
1755 unsigned int bps_count = 0;
1756
1757 memset(buf, 0, count);
1758
1759 if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1760 return 0;
1761
1762 for (; start < end && start < bps_count; start++)
1763 s += scnprintf(&buf[s], element_size + 1,
1764 "0x%08x : 0x%08x : %1s\n",
1765 bps[start].bp,
1766 bps[start].size,
1767 amdgpu_ras_badpage_flags_str(bps[start].flags));
1768
1769 kfree(bps);
1770
1771 return s;
1772}
1773
c030f2e4 1774static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1775 struct device_attribute *attr, char *buf)
1776{
1777 struct amdgpu_ras *con =
1778 container_of(attr, struct amdgpu_ras, features_attr);
c030f2e4 1779
2cffcb66 1780 return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
c030f2e4 1781}
1782
625e5f38
AK
1783static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev,
1784 struct device_attribute *attr, char *buf)
1785{
1786 struct amdgpu_ras *con =
1787 container_of(attr, struct amdgpu_ras, version_attr);
1788 return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version);
1789}
1790
1791static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev,
1792 struct device_attribute *attr, char *buf)
1793{
1794 struct amdgpu_ras *con =
1795 container_of(attr, struct amdgpu_ras, schema_attr);
1796 return sysfs_emit(buf, "schema: 0x%x\n", con->schema);
1797}
1798
59f488be
YW
1799static struct {
1800 enum ras_event_type type;
1801 const char *name;
1802} dump_event[] = {
1803 {RAS_EVENT_TYPE_FATAL, "Fatal Error"},
1804 {RAS_EVENT_TYPE_POISON_CREATION, "Poison Creation"},
1805 {RAS_EVENT_TYPE_POISON_CONSUMPTION, "Poison Consumption"},
1806};
1807
1808static ssize_t amdgpu_ras_sysfs_event_state_show(struct device *dev,
1809 struct device_attribute *attr, char *buf)
1810{
1811 struct amdgpu_ras *con =
1812 container_of(attr, struct amdgpu_ras, event_state_attr);
1813 struct ras_event_manager *event_mgr = con->event_mgr;
1814 struct ras_event_state *event_state;
1815 int i, size = 0;
1816
1817 if (!event_mgr)
1818 return -EINVAL;
1819
1820 size += sysfs_emit_at(buf, size, "current seqno: %llu\n", atomic64_read(&event_mgr->seqno));
1821 for (i = 0; i < ARRAY_SIZE(dump_event); i++) {
1822 event_state = &event_mgr->event_state[dump_event[i].type];
1823 size += sysfs_emit_at(buf, size, "%s: count:%llu, last_seqno:%llu\n",
1824 dump_event[i].name,
1825 atomic64_read(&event_state->count),
1826 event_state->last_seqno);
1827 }
1828
1829 return (ssize_t)size;
1830}
1831
f848159b
GC
1832static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1833{
1834 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1835
4638e0c2
VP
1836 if (adev->dev->kobj.sd)
1837 sysfs_remove_file_from_group(&adev->dev->kobj,
f848159b
GC
1838 &con->badpages_attr.attr,
1839 RAS_FS_NAME);
1840}
1841
625e5f38 1842static int amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device *adev)
c030f2e4 1843{
1844 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1845 struct attribute *attrs[] = {
1846 &con->features_attr.attr,
625e5f38
AK
1847 &con->version_attr.attr,
1848 &con->schema_attr.attr,
59f488be 1849 &con->event_state_attr.attr,
c030f2e4 1850 NULL
1851 };
1852 struct attribute_group group = {
eb0c3cd4 1853 .name = RAS_FS_NAME,
c030f2e4 1854 .attrs = attrs,
1855 };
1856
4638e0c2
VP
1857 if (adev->dev->kobj.sd)
1858 sysfs_remove_group(&adev->dev->kobj, &group);
c030f2e4 1859
1860 return 0;
1861}
1862
1863int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
9252d33d 1864 struct ras_common_if *head)
c030f2e4 1865{
9252d33d 1866 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
c030f2e4 1867
9262f411
YW
1868 if (amdgpu_aca_is_enabled(adev))
1869 return 0;
1870
c030f2e4 1871 if (!obj || obj->attr_inuse)
1872 return -EINVAL;
1873
04893397
VS
1874 if (amdgpu_sriov_vf(adev) && !amdgpu_virt_ras_telemetry_block_en(adev, head->block))
1875 return 0;
1876
c030f2e4 1877 get_obj(obj);
1878
9252d33d 1879 snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
1880 "%s_err_count", head->name);
c030f2e4 1881
1882 obj->sysfs_attr = (struct device_attribute){
1883 .attr = {
1884 .name = obj->fs_data.sysfs_name,
1885 .mode = S_IRUGO,
1886 },
1887 .show = amdgpu_ras_sysfs_read,
1888 };
163def43 1889 sysfs_attr_init(&obj->sysfs_attr.attr);
c030f2e4 1890
1891 if (sysfs_add_file_to_group(&adev->dev->kobj,
1892 &obj->sysfs_attr.attr,
eb0c3cd4 1893 RAS_FS_NAME)) {
c030f2e4 1894 put_obj(obj);
1895 return -EINVAL;
1896 }
1897
1898 obj->attr_inuse = 1;
1899
1900 return 0;
1901}
1902
1903int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1904 struct ras_common_if *head)
1905{
1906 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1907
9262f411
YW
1908 if (amdgpu_aca_is_enabled(adev))
1909 return 0;
1910
c030f2e4 1911 if (!obj || !obj->attr_inuse)
1912 return -EINVAL;
1913
4638e0c2
VP
1914 if (adev->dev->kobj.sd)
1915 sysfs_remove_file_from_group(&adev->dev->kobj,
c030f2e4 1916 &obj->sysfs_attr.attr,
eb0c3cd4 1917 RAS_FS_NAME);
c030f2e4 1918 obj->attr_inuse = 0;
1919 put_obj(obj);
1920
1921 return 0;
1922}
1923
1924static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1925{
1926 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1927 struct ras_manager *obj, *tmp;
1928
1929 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1930 amdgpu_ras_sysfs_remove(adev, &obj->head);
1931 }
1932
f848159b
GC
1933 if (amdgpu_bad_page_threshold != 0)
1934 amdgpu_ras_sysfs_remove_bad_page_node(adev);
1935
625e5f38 1936 amdgpu_ras_sysfs_remove_dev_attr_node(adev);
c030f2e4 1937
1938 return 0;
1939}
1940/* sysfs end */
1941
ef177d11
AD
1942/**
1943 * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1944 *
1945 * Normally when there is an uncorrectable error, the driver will reset
1946 * the GPU to recover. However, in the event of an unrecoverable error,
1947 * the driver provides an interface to reboot the system automatically
1948 * in that event.
1949 *
1950 * The following file in debugfs provides that interface:
1951 * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1952 *
1953 * Usage:
1954 *
1955 * .. code-block:: bash
1956 *
1957 * echo true > .../ras/auto_reboot
1958 *
1959 */
c030f2e4 1960/* debugfs begin */
ea1b8c9b 1961static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
36ea1bd2 1962{
1963 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
740f42a2 1964 struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control;
ef0d7d20
LT
1965 struct drm_minor *minor = adev_to_drm(adev)->primary;
1966 struct dentry *dir;
36ea1bd2 1967
88293c03
ND
1968 dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1969 debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1970 &amdgpu_ras_debugfs_ctrl_ops);
1971 debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1972 &amdgpu_ras_debugfs_eeprom_ops);
7fb64071
LT
1973 debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
1974 &con->bad_page_cnt_threshold);
740f42a2 1975 debugfs_create_u32("ras_num_recs", 0444, dir, &eeprom->ras_num_recs);
ef0d7d20
LT
1976 debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
1977 debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
c65b0805
LT
1978 debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
1979 &amdgpu_ras_debugfs_eeprom_size_ops);
1980 con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
1981 S_IRUGO, dir, adev,
1982 &amdgpu_ras_debugfs_eeprom_table_ops);
1983 amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
c688a06b
GC
1984
1985 /*
1986 * After one uncorrectable error happens, usually GPU recovery will
1987 * be scheduled. But due to the known problem in GPU recovery failing
1988 * to bring GPU back, below interface provides one direct way to
1989 * user to reboot system automatically in such case within
1990 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1991 * will never be called.
1992 */
88293c03 1993 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
66459e1d
GC
1994
1995 /*
1996 * User could set this not to clean up hardware's error count register
1997 * of RAS IPs during ras recovery.
1998 */
88293c03
ND
1999 debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
2000 &con->disable_ras_err_cnt_harvest);
2001 return dir;
36ea1bd2 2002}
2003
cedf7884 2004static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
88293c03
ND
2005 struct ras_fs_if *head,
2006 struct dentry *dir)
c030f2e4 2007{
c030f2e4 2008 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
c030f2e4 2009
88293c03 2010 if (!obj || !dir)
450f30ea 2011 return;
c030f2e4 2012
2013 get_obj(obj);
2014
2015 memcpy(obj->fs_data.debugfs_name,
2016 head->debugfs_name,
2017 sizeof(obj->fs_data.debugfs_name));
2018
88293c03
ND
2019 debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
2020 obj, &amdgpu_ras_debugfs_ops);
c030f2e4 2021}
2022
9817f061
YW
2023static bool amdgpu_ras_aca_is_supported(struct amdgpu_device *adev)
2024{
2025 bool ret;
2026
2027 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
2028 case IP_VERSION(13, 0, 6):
9a826c4a 2029 case IP_VERSION(13, 0, 12):
9817f061
YW
2030 case IP_VERSION(13, 0, 14):
2031 ret = true;
2032 break;
2033 default:
2034 ret = false;
2035 break;
2036 }
2037
2038 return ret;
2039}
2040
f9317014
TZ
2041void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
2042{
2043 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
88293c03 2044 struct dentry *dir;
c1509f3f 2045 struct ras_manager *obj;
f9317014
TZ
2046 struct ras_fs_if fs_info;
2047
2048 /*
2049 * it won't be called in resume path, no need to check
2050 * suspend and gpu reset status
2051 */
cedf7884 2052 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
f9317014
TZ
2053 return;
2054
88293c03 2055 dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
f9317014 2056
c1509f3f 2057 list_for_each_entry(obj, &con->head, node) {
f9317014
TZ
2058 if (amdgpu_ras_is_supported(adev, obj->head.block) &&
2059 (obj->attr_inuse == 1)) {
2060 sprintf(fs_info.debugfs_name, "%s_err_inject",
640ae42e 2061 get_ras_block_str(&obj->head));
f9317014 2062 fs_info.head = obj->head;
88293c03 2063 amdgpu_ras_debugfs_create(adev, &fs_info, dir);
f9317014
TZ
2064 }
2065 }
4051844c 2066
9817f061
YW
2067 if (amdgpu_ras_aca_is_supported(adev)) {
2068 if (amdgpu_aca_is_enabled(adev))
2069 amdgpu_aca_smu_debugfs_init(adev, dir);
2070 else
2071 amdgpu_mca_smu_debugfs_init(adev, dir);
2072 }
f9317014
TZ
2073}
2074
c030f2e4 2075/* debugfs end */
2076
2077/* ras fs */
2d0f5001
TW
2078static const BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
2079 amdgpu_ras_sysfs_badpages_read, NULL, 0);
c3d4d45d
GC
2080static DEVICE_ATTR(features, S_IRUGO,
2081 amdgpu_ras_sysfs_features_read, NULL);
625e5f38
AK
2082static DEVICE_ATTR(version, 0444,
2083 amdgpu_ras_sysfs_version_show, NULL);
2084static DEVICE_ATTR(schema, 0444,
2085 amdgpu_ras_sysfs_schema_show, NULL);
59f488be
YW
2086static DEVICE_ATTR(event_state, 0444,
2087 amdgpu_ras_sysfs_event_state_show, NULL);
c030f2e4 2088static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
2089{
c3d4d45d
GC
2090 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2091 struct attribute_group group = {
2092 .name = RAS_FS_NAME,
2093 };
2094 struct attribute *attrs[] = {
2095 &con->features_attr.attr,
625e5f38
AK
2096 &con->version_attr.attr,
2097 &con->schema_attr.attr,
59f488be 2098 &con->event_state_attr.attr,
c3d4d45d
GC
2099 NULL
2100 };
2d0f5001 2101 const struct bin_attribute *bin_attrs[] = {
c3d4d45d
GC
2102 NULL,
2103 NULL,
2104 };
a069a9eb 2105 int r;
c030f2e4 2106
625e5f38
AK
2107 group.attrs = attrs;
2108
c3d4d45d
GC
2109 /* add features entry */
2110 con->features_attr = dev_attr_features;
c3d4d45d
GC
2111 sysfs_attr_init(attrs[0]);
2112
625e5f38
AK
2113 /* add version entry */
2114 con->version_attr = dev_attr_version;
2115 sysfs_attr_init(attrs[1]);
2116
2117 /* add schema entry */
2118 con->schema_attr = dev_attr_schema;
2119 sysfs_attr_init(attrs[2]);
2120
59f488be
YW
2121 /* add event_state entry */
2122 con->event_state_attr = dev_attr_event_state;
2123 sysfs_attr_init(attrs[3]);
2124
c3d4d45d
GC
2125 if (amdgpu_bad_page_threshold != 0) {
2126 /* add bad_page_features entry */
c3d4d45d 2127 con->badpages_attr = bin_attr_gpu_vram_bad_pages;
2d0f5001 2128 sysfs_bin_attr_init(&con->badpages_attr);
c3d4d45d 2129 bin_attrs[0] = &con->badpages_attr;
fb506e31 2130 group.bin_attrs = bin_attrs;
c3d4d45d
GC
2131 }
2132
a069a9eb
AD
2133 r = sysfs_create_group(&adev->dev->kobj, &group);
2134 if (r)
2135 dev_err(adev->dev, "Failed to create RAS sysfs group!");
f848159b 2136
c030f2e4 2137 return 0;
2138}
2139
2140static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
2141{
88293c03
ND
2142 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2143 struct ras_manager *con_obj, *ip_obj, *tmp;
2144
2145 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2146 list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
2147 ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
2148 if (ip_obj)
2149 put_obj(ip_obj);
2150 }
2151 }
2152
c030f2e4 2153 amdgpu_ras_sysfs_remove_all(adev);
2154 return 0;
2155}
2156/* ras fs end */
2157
2158/* ih begin */
b3c76814
TZ
2159
2160/* For the hardware that cannot enable bif ring for both ras_controller_irq
2161 * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
2162 * register to check whether the interrupt is triggered or not, and properly
2163 * ack the interrupt if it is there
2164 */
2165void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
2166{
950d6425 2167 /* Fatal error events are handled on host side */
8eba7205 2168 if (amdgpu_sriov_vf(adev))
b3c76814 2169 return;
0105725e 2170 /*
e1ee2111
LL
2171 * If the current interrupt is caused by a non-fatal RAS error, skip
2172 * check for fatal error. For fatal errors, FED status of all devices
2173 * in XGMI hive gets set when the first device gets fatal error
2174 * interrupt. The error gets propagated to other devices as well, so
2175 * make sure to ack the interrupt regardless of FED status.
2176 */
2177 if (!amdgpu_ras_get_fed_status(adev) &&
2178 amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY))
2179 return;
b3c76814
TZ
2180
2181 if (adev->nbio.ras &&
2182 adev->nbio.ras->handle_ras_controller_intr_no_bifring)
2183 adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
2184
2185 if (adev->nbio.ras &&
2186 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
2187 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
2188}
2189
66f87949
TZ
2190static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
2191 struct amdgpu_iv_entry *entry)
2192{
b63ac5d3 2193 bool poison_stat = false;
66f87949 2194 struct amdgpu_device *adev = obj->adev;
66f87949
TZ
2195 struct amdgpu_ras_block_object *block_obj =
2196 amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
5f7697bb 2197 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
12b435a4
YW
2198 enum ras_event_type type = RAS_EVENT_TYPE_POISON_CONSUMPTION;
2199 u64 event_id;
2200 int ret;
66f87949 2201
5f7697bb 2202 if (!block_obj || !con)
b63ac5d3 2203 return;
66f87949 2204
12b435a4
YW
2205 ret = amdgpu_ras_mark_ras_event(adev, type);
2206 if (ret)
2207 return;
2208
e1ee2111 2209 amdgpu_ras_set_err_poison(adev, block_obj->ras_comm.block);
b63ac5d3
TZ
2210 /* both query_poison_status and handle_poison_consumption are optional,
2211 * but at least one of them should be implemented if we need poison
2212 * consumption handler
2213 */
ac7b25d9 2214 if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
b63ac5d3
TZ
2215 poison_stat = block_obj->hw_ops->query_poison_status(adev);
2216 if (!poison_stat) {
2217 /* Not poison consumption interrupt, no need to handle it */
2218 dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
2219 block_obj->ras_comm.name);
2220
2221 return;
66f87949
TZ
2222 }
2223 }
2224
2fc46e0b 2225 amdgpu_umc_poison_handler(adev, obj->head.block, 0);
b63ac5d3 2226
ac7b25d9 2227 if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
b63ac5d3
TZ
2228 poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
2229
5f7697bb
TZ
2230 /* gpu reset is fallback for failed and default cases.
2231 * For RMA case, amdgpu_umc_poison_handler will handle gpu reset.
2232 */
792be2e2 2233 if (poison_stat && !amdgpu_ras_is_rma(adev)) {
12b435a4
YW
2234 event_id = amdgpu_ras_acquire_event_id(adev, type);
2235 RAS_EVENT_LOG(adev, event_id,
2236 "GPU reset for %s RAS poison consumption is issued!\n",
2237 block_obj->ras_comm.name);
66f87949 2238 amdgpu_ras_reset_gpu(adev);
b63ac5d3 2239 }
5f7697bb
TZ
2240
2241 if (!poison_stat)
2242 amdgpu_gfx_poison_consumption_handler(adev, entry);
66f87949
TZ
2243}
2244
50a7d025
TZ
2245static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
2246 struct amdgpu_iv_entry *entry)
2247{
5b9de259
YW
2248 struct amdgpu_device *adev = obj->adev;
2249 enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
2250 u64 event_id;
2251 int ret;
2252
2253 ret = amdgpu_ras_mark_ras_event(adev, type);
2254 if (ret)
2255 return;
2256
2257 event_id = amdgpu_ras_acquire_event_id(adev, type);
2258 RAS_EVENT_LOG(adev, event_id, "Poison is created\n");
a734adfb
YC
2259
2260 if (amdgpu_ip_version(obj->adev, UMC_HWIP, 0) >= IP_VERSION(12, 0, 0)) {
2261 struct amdgpu_ras *con = amdgpu_ras_get_context(obj->adev);
2262
a734adfb 2263 atomic_inc(&con->page_retirement_req_cnt);
5f08275c 2264 atomic_inc(&con->poison_creation_count);
a734adfb
YC
2265
2266 wake_up(&con->page_retirement_wq);
2267 }
50a7d025
TZ
2268}
2269
2270static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
2271 struct amdgpu_iv_entry *entry)
2272{
2273 struct ras_ih_data *data = &obj->ih_data;
5b1270be 2274 struct ras_err_data err_data;
50a7d025
TZ
2275 int ret;
2276
2277 if (!data->cb)
2278 return;
2279
5b1270be
YW
2280 ret = amdgpu_ras_error_data_init(&err_data);
2281 if (ret)
2282 return;
2283
50a7d025
TZ
2284 /* Let IP handle its data, maybe we need get the output
2285 * from the callback to update the error type/count, etc
2286 */
09a3d820 2287 amdgpu_ras_set_fed(obj->adev, true);
50a7d025
TZ
2288 ret = data->cb(obj->adev, &err_data, entry);
2289 /* ue will trigger an interrupt, and in that case
2290 * we need do a reset to recovery the whole system.
2291 * But leave IP do that recovery, here we just dispatch
2292 * the error.
2293 */
2294 if (ret == AMDGPU_RAS_SUCCESS) {
2295 /* these counts could be left as 0 if
2296 * some blocks do not count error number
2297 */
2298 obj->err_data.ue_count += err_data.ue_count;
2299 obj->err_data.ce_count += err_data.ce_count;
46e2231c 2300 obj->err_data.de_count += err_data.de_count;
50a7d025 2301 }
5b1270be
YW
2302
2303 amdgpu_ras_error_data_fini(&err_data);
50a7d025
TZ
2304}
2305
c030f2e4 2306static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
2307{
2308 struct ras_ih_data *data = &obj->ih_data;
2309 struct amdgpu_iv_entry entry;
c030f2e4 2310
2311 while (data->rptr != data->wptr) {
2312 rmb();
2313 memcpy(&entry, &data->ring[data->rptr],
2314 data->element_size);
2315
2316 wmb();
2317 data->rptr = (data->aligned_element_size +
2318 data->rptr) % data->ring_size;
2319
50a7d025
TZ
2320 if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
2321 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2322 amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
66f87949
TZ
2323 else
2324 amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
50a7d025
TZ
2325 } else {
2326 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2327 amdgpu_ras_interrupt_umc_handler(obj, &entry);
2328 else
2329 dev_warn(obj->adev->dev,
2330 "No RAS interrupt handler for non-UMC block with poison disabled.\n");
c030f2e4 2331 }
2332 }
2333}
2334
2335static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
2336{
2337 struct ras_ih_data *data =
2338 container_of(work, struct ras_ih_data, ih_work);
2339 struct ras_manager *obj =
2340 container_of(data, struct ras_manager, ih_data);
2341
2342 amdgpu_ras_interrupt_handler(obj);
2343}
2344
2345int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
2346 struct ras_dispatch_if *info)
2347{
4c11d30c
MJ
2348 struct ras_manager *obj;
2349 struct ras_ih_data *data;
c030f2e4 2350
4c11d30c 2351 obj = amdgpu_ras_find_obj(adev, &info->head);
c030f2e4 2352 if (!obj)
2353 return -EINVAL;
2354
4c11d30c
MJ
2355 data = &obj->ih_data;
2356
c030f2e4 2357 if (data->inuse == 0)
2358 return 0;
2359
2360 /* Might be overflow... */
2361 memcpy(&data->ring[data->wptr], info->entry,
2362 data->element_size);
2363
2364 wmb();
2365 data->wptr = (data->aligned_element_size +
2366 data->wptr) % data->ring_size;
2367
2368 schedule_work(&data->ih_work);
2369
2370 return 0;
2371}
2372
2373int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
9252d33d 2374 struct ras_common_if *head)
c030f2e4 2375{
9252d33d 2376 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
c030f2e4 2377 struct ras_ih_data *data;
2378
2379 if (!obj)
2380 return -EINVAL;
2381
2382 data = &obj->ih_data;
2383 if (data->inuse == 0)
2384 return 0;
2385
2386 cancel_work_sync(&data->ih_work);
2387
2388 kfree(data->ring);
2389 memset(data, 0, sizeof(*data));
2390 put_obj(obj);
2391
2392 return 0;
2393}
2394
2395int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
9252d33d 2396 struct ras_common_if *head)
c030f2e4 2397{
9252d33d 2398 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
c030f2e4 2399 struct ras_ih_data *data;
9252d33d 2400 struct amdgpu_ras_block_object *ras_obj;
c030f2e4 2401
2402 if (!obj) {
2403 /* in case we registe the IH before enable ras feature */
9252d33d 2404 obj = amdgpu_ras_create_obj(adev, head);
c030f2e4 2405 if (!obj)
2406 return -EINVAL;
2407 } else
2408 get_obj(obj);
2409
9252d33d 2410 ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
2411
c030f2e4 2412 data = &obj->ih_data;
2413 /* add the callback.etc */
2414 *data = (struct ras_ih_data) {
2415 .inuse = 0,
9252d33d 2416 .cb = ras_obj->ras_cb,
c030f2e4 2417 .element_size = sizeof(struct amdgpu_iv_entry),
2418 .rptr = 0,
2419 .wptr = 0,
2420 };
2421
2422 INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
2423
2424 data->aligned_element_size = ALIGN(data->element_size, 8);
2425 /* the ring can store 64 iv entries. */
2426 data->ring_size = 64 * data->aligned_element_size;
2427 data->ring = kmalloc(data->ring_size, GFP_KERNEL);
2428 if (!data->ring) {
2429 put_obj(obj);
2430 return -ENOMEM;
2431 }
2432
2433 /* IH is ready */
2434 data->inuse = 1;
2435
2436 return 0;
2437}
2438
2439static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
2440{
2441 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2442 struct ras_manager *obj, *tmp;
2443
2444 list_for_each_entry_safe(obj, tmp, &con->head, node) {
9252d33d 2445 amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
c030f2e4 2446 }
2447
2448 return 0;
2449}
2450/* ih end */
2451
313c8fd3 2452/* traversal all IPs except NBIO to query error counter */
75ac6a25 2453static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev, enum ras_event_type type)
313c8fd3
GC
2454{
2455 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2456 struct ras_manager *obj;
2457
8ab0d6f0 2458 if (!adev->ras_enabled || !con)
313c8fd3
GC
2459 return;
2460
2461 list_for_each_entry(obj, &con->head, node) {
2462 struct ras_query_if info = {
2463 .head = obj->head,
2464 };
2465
2466 /*
2467 * PCIE_BIF IP has one different isr by ras controller
2468 * interrupt, the specific ras counter query will be
2469 * done in that isr. So skip such block from common
2470 * sync flood interrupt isr calling.
2471 */
2472 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
2473 continue;
2474
cf63b702
SY
2475 /*
2476 * this is a workaround for aldebaran, skip send msg to
2477 * smu to get ecc_info table due to smu handle get ecc
2478 * info table failed temporarily.
2479 * should be removed until smu fix handle ecc_info table.
2480 */
2481 if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
4e8303cf
LL
2482 (amdgpu_ip_version(adev, MP1_HWIP, 0) ==
2483 IP_VERSION(13, 0, 2)))
cf63b702
SY
2484 continue;
2485
75ac6a25 2486 amdgpu_ras_query_error_status_with_event(adev, &info, type);
2a460963 2487
4e8303cf
LL
2488 if (amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2489 IP_VERSION(11, 0, 2) &&
2490 amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2491 IP_VERSION(11, 0, 4) &&
2492 amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2493 IP_VERSION(13, 0, 0)) {
2a460963
CL
2494 if (amdgpu_ras_reset_error_status(adev, info.head.block))
2495 dev_warn(adev->dev, "Failed to reset error counter and error status");
2496 }
313c8fd3
GC
2497 }
2498}
2499
3f975d0f 2500/* Parse RdRspStatus and WrRspStatus */
cd92df93
LJ
2501static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
2502 struct ras_query_if *info)
3f975d0f 2503{
8eb53bb2 2504 struct amdgpu_ras_block_object *block_obj;
3f975d0f
SY
2505 /*
2506 * Only two block need to query read/write
2507 * RspStatus at current state
2508 */
5e67bba3 2509 if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
2510 (info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
b6efdb02 2511 return;
2512
2513 block_obj = amdgpu_ras_get_ras_block(adev,
2514 info->head.block,
2515 info->head.sub_block_index);
5e67bba3 2516
5e67bba3 2517 if (!block_obj || !block_obj->hw_ops) {
afa37315
LT
2518 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
2519 get_ras_block_str(&info->head));
b6efdb02 2520 return;
3f975d0f 2521 }
5e67bba3 2522
2523 if (block_obj->hw_ops->query_ras_error_status)
ab3b9de6 2524 block_obj->hw_ops->query_ras_error_status(adev);
5e67bba3 2525
3f975d0f
SY
2526}
2527
2528static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
2529{
2530 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2531 struct ras_manager *obj;
2532
8ab0d6f0 2533 if (!adev->ras_enabled || !con)
3f975d0f
SY
2534 return;
2535
2536 list_for_each_entry(obj, &con->head, node) {
2537 struct ras_query_if info = {
2538 .head = obj->head,
2539 };
2540
2541 amdgpu_ras_error_status_query(adev, &info);
2542 }
2543}
2544
c030f2e4 2545/* recovery begin */
466b1793 2546
2547/* return 0 on success.
2548 * caller need free bps.
2549 */
2550static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
2551 struct ras_badpage **bps, unsigned int *count)
2552{
2553 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2554 struct ras_err_handler_data *data;
2555 int i = 0;
732f2a30 2556 int ret = 0, status;
466b1793 2557
2558 if (!con || !con->eh_data || !bps || !count)
2559 return -EINVAL;
2560
2561 mutex_lock(&con->recovery_lock);
2562 data = con->eh_data;
2563 if (!data || data->count == 0) {
2564 *bps = NULL;
46cf2fec 2565 ret = -EINVAL;
466b1793 2566 goto out;
2567 }
2568
2569 *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
2570 if (!*bps) {
2571 ret = -ENOMEM;
2572 goto out;
2573 }
2574
2575 for (; i < data->count; i++) {
2576 (*bps)[i] = (struct ras_badpage){
9dc23a63 2577 .bp = data->bps[i].retired_page,
466b1793 2578 .size = AMDGPU_GPU_PAGE_SIZE,
52dd95f2 2579 .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
466b1793 2580 };
ec6aae97 2581 status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
bcc09348 2582 data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT);
732f2a30 2583 if (status == -EBUSY)
52dd95f2 2584 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
732f2a30 2585 else if (status == -ENOENT)
52dd95f2 2586 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
466b1793 2587 }
2588
2589 *count = data->count;
2590out:
2591 mutex_unlock(&con->recovery_lock);
2592 return ret;
2593}
2594
b41f742d
LL
2595static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev,
2596 struct amdgpu_hive_info *hive, bool status)
2597{
2598 struct amdgpu_device *tmp_adev;
2599
2600 if (hive) {
2601 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
2602 amdgpu_ras_set_fed(tmp_adev, status);
2603 } else {
2604 amdgpu_ras_set_fed(adev, status);
2605 }
2606}
2607
7e437167
TZ
2608bool amdgpu_ras_in_recovery(struct amdgpu_device *adev)
2609{
2610 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2611 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2612 int hive_ras_recovery = 0;
2613
2614 if (hive) {
2615 hive_ras_recovery = atomic_read(&hive->ras_recovery);
2616 amdgpu_put_xgmi_hive(hive);
2617 }
2618
2619 if (ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery))
2620 return true;
2621
2622 return false;
2623}
2624
75ac6a25
YW
2625static enum ras_event_type amdgpu_ras_get_fatal_error_event(struct amdgpu_device *adev)
2626{
2627 if (amdgpu_ras_intr_triggered())
2628 return RAS_EVENT_TYPE_FATAL;
2629 else
12b435a4 2630 return RAS_EVENT_TYPE_POISON_CONSUMPTION;
75ac6a25
YW
2631}
2632
c030f2e4 2633static void amdgpu_ras_do_recovery(struct work_struct *work)
2634{
2635 struct amdgpu_ras *ras =
2636 container_of(work, struct amdgpu_ras, recovery_work);
b3dbd6d3
JC
2637 struct amdgpu_device *remote_adev = NULL;
2638 struct amdgpu_device *adev = ras->adev;
2639 struct list_head device_list, *device_list_handle = NULL;
53dd920c 2640 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
75ac6a25 2641 enum ras_event_type type;
b3dbd6d3 2642
b41f742d 2643 if (hive) {
53dd920c 2644 atomic_set(&hive->ras_recovery, 1);
b41f742d
LL
2645
2646 /* If any device which is part of the hive received RAS fatal
2647 * error interrupt, set fatal error status on all. This
2648 * condition will need a recovery, and flag will be cleared
2649 * as part of recovery.
2650 */
2651 list_for_each_entry(remote_adev, &hive->device_list,
2652 gmc.xgmi.head)
2653 if (amdgpu_ras_get_fed_status(remote_adev)) {
2654 amdgpu_ras_set_fed_all(adev, hive, true);
2655 break;
2656 }
2657 }
f75e94d8 2658 if (!ras->disable_ras_err_cnt_harvest) {
d95e8e97 2659
f75e94d8
GC
2660 /* Build list of devices to query RAS related errors */
2661 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
2662 device_list_handle = &hive->device_list;
2663 } else {
2664 INIT_LIST_HEAD(&device_list);
2665 list_add_tail(&adev->gmc.xgmi.head, &device_list);
2666 device_list_handle = &device_list;
2667 }
c030f2e4 2668
75ac6a25 2669 type = amdgpu_ras_get_fatal_error_event(adev);
f75e94d8 2670 list_for_each_entry(remote_adev,
3f975d0f
SY
2671 device_list_handle, gmc.xgmi.head) {
2672 amdgpu_ras_query_err_status(remote_adev);
75ac6a25 2673 amdgpu_ras_log_on_err_counter(remote_adev, type);
3f975d0f 2674 }
d95e8e97 2675
b3dbd6d3 2676 }
313c8fd3 2677
f1549c09
LG
2678 if (amdgpu_device_should_recover_gpu(ras->adev)) {
2679 struct amdgpu_reset_context reset_context;
2680 memset(&reset_context, 0, sizeof(reset_context));
2681
2682 reset_context.method = AMD_RESET_METHOD_NONE;
2683 reset_context.reset_req_dev = adev;
bac640dd 2684 reset_context.src = AMDGPU_RESET_SRC_RAS;
81db4eab 2685 set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
1a11a65d
YC
2686
2687 /* Perform full reset in fatal error mode */
2688 if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
2689 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
6c47a79b 2690 else {
1a11a65d 2691 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
f1549c09 2692
6c47a79b
YC
2693 if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) {
2694 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET;
2695 reset_context.method = AMD_RESET_METHOD_MODE2;
2696 }
2c7cd280
YC
2697
2698 /* Fatal error occurs in poison mode, mode1 reset is used to
2699 * recover gpu.
2700 */
2701 if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) {
2702 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET;
2703 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
1b98a5f8
YC
2704
2705 psp_fatal_error_recovery_quirk(&adev->psp);
2c7cd280 2706 }
6c47a79b
YC
2707 }
2708
f1549c09
LG
2709 amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
2710 }
c030f2e4 2711 atomic_set(&ras->in_recovery, 0);
53dd920c
AK
2712 if (hive) {
2713 atomic_set(&hive->ras_recovery, 0);
2714 amdgpu_put_xgmi_hive(hive);
2715 }
c030f2e4 2716}
2717
c030f2e4 2718/* alloc/realloc bps array */
2719static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
2720 struct ras_err_handler_data *data, int pages)
2721{
2722 unsigned int old_space = data->count + data->space_left;
2723 unsigned int new_space = old_space + pages;
9dc23a63
TZ
2724 unsigned int align_space = ALIGN(new_space, 512);
2725 void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
9dc23a63 2726
676deb38 2727 if (!bps) {
c030f2e4 2728 return -ENOMEM;
9dc23a63 2729 }
c030f2e4 2730
2731 if (data->bps) {
9dc23a63 2732 memcpy(bps, data->bps,
c030f2e4 2733 data->count * sizeof(*data->bps));
2734 kfree(data->bps);
2735 }
2736
9dc23a63 2737 data->bps = bps;
c030f2e4 2738 data->space_left += align_space - old_space;
2739 return 0;
2740}
2741
a8d133e6 2742static int amdgpu_ras_mca2pa_by_idx(struct amdgpu_device *adev,
0eecff79
TZ
2743 struct eeprom_table_record *bps,
2744 struct ras_err_data *err_data)
2745{
2746 struct ta_ras_query_address_input addr_in;
2747 uint32_t socket = 0;
2748 int ret = 0;
2749
2750 if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id)
2751 socket = adev->smuio.funcs->get_socket_id(adev);
2752
2753 /* reinit err_data */
2754 err_data->err_addr_cnt = 0;
2755 err_data->err_addr_len = adev->umc.retire_unit;
2756
2757 memset(&addr_in, 0, sizeof(addr_in));
2758 addr_in.ma.err_addr = bps->address;
2759 addr_in.ma.socket_id = socket;
2760 addr_in.ma.ch_inst = bps->mem_channel;
2761 /* tell RAS TA the node instance is not used */
2762 addr_in.ma.node_inst = TA_RAS_INV_NODE;
2763
2764 if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
2765 ret = adev->umc.ras->convert_ras_err_addr(adev, err_data,
2766 &addr_in, NULL, false);
2767
2768 return ret;
2769}
2770
a8d133e6
TZ
2771static int amdgpu_ras_mca2pa(struct amdgpu_device *adev,
2772 struct eeprom_table_record *bps,
2773 struct ras_err_data *err_data)
2774{
2775 struct ta_ras_query_address_input addr_in;
2776 uint32_t die_id, socket = 0;
2777
2778 if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id)
2779 socket = adev->smuio.funcs->get_socket_id(adev);
2780
2781 /* although die id is gotten from PA in nps1 mode, the id is
2782 * fitable for any nps mode
2783 */
2784 if (adev->umc.ras && adev->umc.ras->get_die_id_from_pa)
2785 die_id = adev->umc.ras->get_die_id_from_pa(adev, bps->address,
2786 bps->retired_page << AMDGPU_GPU_PAGE_SHIFT);
2787 else
2788 return -EINVAL;
2789
2790 /* reinit err_data */
2791 err_data->err_addr_cnt = 0;
2792 err_data->err_addr_len = adev->umc.retire_unit;
2793
2794 memset(&addr_in, 0, sizeof(addr_in));
2795 addr_in.ma.err_addr = bps->address;
2796 addr_in.ma.ch_inst = bps->mem_channel;
2797 addr_in.ma.umc_inst = bps->mcumc_id;
2798 addr_in.ma.node_inst = die_id;
2799 addr_in.ma.socket_id = socket;
2800
2801 if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
2802 return adev->umc.ras->convert_ras_err_addr(adev, err_data,
2803 &addr_in, NULL, false);
2804 else
2805 return -EINVAL;
2806}
2807
0153d276 2808static int __amdgpu_ras_restore_bad_pages(struct amdgpu_device *adev,
2809 struct eeprom_table_record *bps, int count)
2810{
2811 int j;
2812 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2813 struct ras_err_handler_data *data = con->eh_data;
2814
2815 for (j = 0; j < count; j++) {
2816 if (amdgpu_ras_check_bad_page_unlock(con,
2817 bps[j].retired_page << AMDGPU_GPU_PAGE_SHIFT))
2818 continue;
2819
2820 if (!data->space_left &&
2821 amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
2822 return -ENOMEM;
2823 }
2824
2825 amdgpu_ras_reserve_page(adev, bps[j].retired_page);
2826
2827 memcpy(&data->bps[data->count], &(bps[j]),
2828 sizeof(struct eeprom_table_record));
2829 data->count++;
2830 data->space_left--;
2831 }
2832
2833 return 0;
2834}
2835
2836static int __amdgpu_ras_convert_rec_array_from_rom(struct amdgpu_device *adev,
2837 struct eeprom_table_record *bps, struct ras_err_data *err_data,
2838 enum amdgpu_memory_partition nps)
2839{
2840 int i = 0;
2841 enum amdgpu_memory_partition save_nps;
2842
2843 save_nps = (bps[0].retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
2844
a4b6e990 2845 /*old asics just have pa in eeprom*/
2846 if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) {
2847 memcpy(err_data->err_addr, bps,
2848 sizeof(struct eeprom_table_record) * adev->umc.retire_unit);
2849 goto out;
2850 }
2851
0153d276 2852 for (i = 0; i < adev->umc.retire_unit; i++)
2853 bps[i].retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);
2854
2855 if (save_nps) {
2856 if (save_nps == nps) {
2857 if (amdgpu_umc_pages_in_a_row(adev, err_data,
2858 bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT))
2859 return -EINVAL;
48ee3d8e 2860 for (i = 0; i < adev->umc.retire_unit; i++) {
2861 err_data->err_addr[i].address = bps[0].address;
2862 err_data->err_addr[i].mem_channel = bps[0].mem_channel;
2863 err_data->err_addr[i].bank = bps[0].bank;
2864 err_data->err_addr[i].err_type = bps[0].err_type;
2865 err_data->err_addr[i].mcumc_id = bps[0].mcumc_id;
2866 }
0153d276 2867 } else {
2868 if (amdgpu_ras_mca2pa_by_idx(adev, &bps[0], err_data))
2869 return -EINVAL;
2870 }
2871 } else {
fce0afca 2872 if (bps[0].address == 0) {
2873 /* for specific old eeprom data, mca address is not stored,
2874 * calc it from pa
2875 */
2876 if (amdgpu_umc_pa2mca(adev, bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT,
2877 &(bps[0].address), AMDGPU_NPS1_PARTITION_MODE))
2878 return -EINVAL;
2879 }
2880
0153d276 2881 if (amdgpu_ras_mca2pa(adev, &bps[0], err_data)) {
2882 if (nps == AMDGPU_NPS1_PARTITION_MODE)
2883 memcpy(err_data->err_addr, bps,
2884 sizeof(struct eeprom_table_record) * adev->umc.retire_unit);
2885 else
2886 return -EOPNOTSUPP;
2887 }
2888 }
2889
a4b6e990 2890out:
0153d276 2891 return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr, adev->umc.retire_unit);
2892}
2893
2894static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev,
2895 struct eeprom_table_record *bps, struct ras_err_data *err_data,
2896 enum amdgpu_memory_partition nps)
2897{
48ee3d8e 2898 int i = 0;
0153d276 2899 enum amdgpu_memory_partition save_nps;
2900
2901 save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
2902 bps->retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);
2903
2904 if (save_nps == nps) {
2905 if (amdgpu_umc_pages_in_a_row(adev, err_data,
2906 bps->retired_page << AMDGPU_GPU_PAGE_SHIFT))
2907 return -EINVAL;
48ee3d8e 2908 for (i = 0; i < adev->umc.retire_unit; i++) {
2909 err_data->err_addr[i].address = bps->address;
2910 err_data->err_addr[i].mem_channel = bps->mem_channel;
2911 err_data->err_addr[i].bank = bps->bank;
2912 err_data->err_addr[i].err_type = bps->err_type;
2913 err_data->err_addr[i].mcumc_id = bps->mcumc_id;
2914 }
0153d276 2915 } else {
31e837d2 2916 if (bps->address) {
2917 if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data))
2918 return -EINVAL;
2919 } else {
2920 /* for specific old eeprom data, mca address is not stored,
2921 * calc it from pa
2922 */
2923 if (amdgpu_umc_pa2mca(adev, bps->retired_page << AMDGPU_GPU_PAGE_SHIFT,
2924 &(bps->address), AMDGPU_NPS1_PARTITION_MODE))
2925 return -EINVAL;
2926
2927 if (amdgpu_ras_mca2pa(adev, bps, err_data))
2928 return -EOPNOTSUPP;
2929 }
0153d276 2930 }
f5db5906 2931
0153d276 2932 return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr,
2933 adev->umc.retire_unit);
2934}
2935
c030f2e4 2936/* it deal with vram only. */
2937int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
a8d133e6 2938 struct eeprom_table_record *bps, int pages, bool from_rom)
c030f2e4 2939{
2940 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
0eecff79 2941 struct ras_err_data err_data;
d08fb663
TZ
2942 struct amdgpu_ras_eeprom_control *control =
2943 &adev->psp.ras_context.ras->eeprom_control;
07dd49e1 2944 enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
c030f2e4 2945 int ret = 0;
f5db5906 2946 uint32_t i = 0;
c030f2e4 2947
73aa8e1a 2948 if (!con || !con->eh_data || !bps || pages <= 0)
c030f2e4 2949 return 0;
2950
a8d133e6 2951 if (from_rom) {
0eecff79
TZ
2952 err_data.err_addr =
2953 kcalloc(adev->umc.retire_unit,
2954 sizeof(struct eeprom_table_record), GFP_KERNEL);
2955 if (!err_data.err_addr) {
2956 dev_warn(adev->dev, "Failed to alloc UMC error address record in mca2pa conversion!\n");
0153d276 2957 return -ENOMEM;
c030f2e4 2958 }
2959
07dd49e1
TZ
2960 if (adev->gmc.gmc_funcs->query_mem_partition_mode)
2961 nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
0eecff79
TZ
2962 }
2963
a8d133e6 2964 mutex_lock(&con->recovery_lock);
0153d276 2965
2966 if (from_rom) {
f5db5906 2967 /* there is no pa recs in V3, so skip pa recs processing */
2968 if (control->tbl_hdr.version < RAS_TABLE_VER_V3) {
2969 for (i = 0; i < pages; i++) {
2970 if (control->ras_num_recs - i >= adev->umc.retire_unit) {
2971 if ((bps[i].address == bps[i + 1].address) &&
2972 (bps[i].mem_channel == bps[i + 1].mem_channel)) {
2973 /* deal with retire_unit records a time */
2974 ret = __amdgpu_ras_convert_rec_array_from_rom(adev,
2975 &bps[i], &err_data, nps);
2976 if (ret)
2977 control->ras_num_bad_pages -= adev->umc.retire_unit;
2978 i += (adev->umc.retire_unit - 1);
2979 } else {
2980 break;
2981 }
a8d133e6 2982 } else {
0153d276 2983 break;
a8d133e6 2984 }
a8d133e6 2985 }
0eecff79 2986 }
0153d276 2987 for (; i < pages; i++) {
2988 ret = __amdgpu_ras_convert_rec_from_rom(adev,
2989 &bps[i], &err_data, nps);
2990 if (ret)
f5db5906 2991 control->ras_num_bad_pages -= adev->umc.retire_unit;
0eecff79 2992 }
0153d276 2993 } else {
2994 ret = __amdgpu_ras_restore_bad_pages(adev, bps, pages);
676deb38 2995 }
0eecff79 2996
a8d133e6 2997 if (from_rom)
0eecff79 2998 kfree(err_data.err_addr);
c030f2e4 2999 mutex_unlock(&con->recovery_lock);
3000
3001 return ret;
3002}
3003
78ad00c9
TZ
3004/*
3005 * write error record array to eeprom, the function should be
3006 * protected by recovery_lock
4d33e0f1 3007 * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
78ad00c9 3008 */
4d33e0f1
TZ
3009int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
3010 unsigned long *new_cnt)
78ad00c9
TZ
3011{
3012 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3013 struct ras_err_handler_data *data;
8a3e801f 3014 struct amdgpu_ras_eeprom_control *control;
c3d4acf0 3015 int save_count, unit_num, bad_page_num, i;
78ad00c9 3016
4d33e0f1
TZ
3017 if (!con || !con->eh_data) {
3018 if (new_cnt)
3019 *new_cnt = 0;
3020
78ad00c9 3021 return 0;
4d33e0f1 3022 }
78ad00c9 3023
cfce8f4f 3024 if (!con->eeprom_control.is_eeprom_valid) {
3025 dev_warn(adev->dev,
3026 "Failed to save EEPROM table data because of EEPROM data corruption!");
3027 if (new_cnt)
3028 *new_cnt = 0;
3029
3030 return 0;
3031 }
3032
d9a69fe5 3033 mutex_lock(&con->recovery_lock);
8a3e801f 3034 control = &con->eeprom_control;
78ad00c9 3035 data = con->eh_data;
ae756cd8 3036 bad_page_num = control->ras_num_bad_pages;
c3d4acf0 3037 save_count = data->count - bad_page_num;
d9a69fe5 3038 mutex_unlock(&con->recovery_lock);
4d33e0f1 3039
c3d4acf0 3040 unit_num = save_count / adev->umc.retire_unit;
4d33e0f1 3041 if (new_cnt)
c3d4acf0 3042 *new_cnt = unit_num;
4d33e0f1 3043
78ad00c9 3044 /* only new entries are saved */
b1628425 3045 if (save_count > 0) {
a4b6e990 3046 /*old asics only save pa to eeprom like before*/
3047 if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) {
c3d4acf0 3048 if (amdgpu_ras_eeprom_append(control,
a4b6e990 3049 &data->bps[bad_page_num], save_count)) {
c3d4acf0
TZ
3050 dev_err(adev->dev, "Failed to save EEPROM table data!");
3051 return -EIO;
3052 }
a4b6e990 3053 } else {
3054 for (i = 0; i < unit_num; i++) {
3055 if (amdgpu_ras_eeprom_append(control,
3056 &data->bps[bad_page_num +
3057 i * adev->umc.retire_unit], 1)) {
3058 dev_err(adev->dev, "Failed to save EEPROM table data!");
3059 return -EIO;
3060 }
3061 }
78ad00c9 3062 }
a4b6e990 3063
b1628425
GC
3064 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
3065 }
3066
78ad00c9
TZ
3067 return 0;
3068}
3069
3070/*
3071 * read error record array in eeprom and reserve enough space for
3072 * storing new bad pages
3073 */
3074static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
3075{
3076 struct amdgpu_ras_eeprom_control *control =
6457205c 3077 &adev->psp.ras_context.ras->eeprom_control;
e4e6a589 3078 struct eeprom_table_record *bps;
a8f921a1 3079 int ret, i = 0;
78ad00c9
TZ
3080
3081 /* no bad page record, skip eeprom access */
0686627b 3082 if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
e4e6a589 3083 return 0;
78ad00c9 3084
0686627b 3085 bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
78ad00c9
TZ
3086 if (!bps)
3087 return -ENOMEM;
3088
0686627b 3089 ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
772df3df 3090 if (ret) {
6952e99c 3091 dev_err(adev->dev, "Failed to load EEPROM table records!");
772df3df 3092 } else {
a8f921a1 3093 if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
f5db5906 3094 /*In V3, there is no pa recs, and some cases(when address==0) may be parsed
3095 as pa recs, so add verion check to avoid it.
3096 */
3097 if (control->tbl_hdr.version < RAS_TABLE_VER_V3) {
3098 for (i = 0; i < control->ras_num_recs; i++) {
3099 if ((control->ras_num_recs - i) >= adev->umc.retire_unit) {
3100 if ((bps[i].address == bps[i + 1].address) &&
3101 (bps[i].mem_channel == bps[i + 1].mem_channel)) {
3102 control->ras_num_pa_recs += adev->umc.retire_unit;
3103 i += (adev->umc.retire_unit - 1);
3104 } else {
3105 control->ras_num_mca_recs +=
3106 (control->ras_num_recs - i);
3107 break;
3108 }
a8f921a1 3109 } else {
f5db5906 3110 control->ras_num_mca_recs += (control->ras_num_recs - i);
a8f921a1 3111 break;
3112 }
a8f921a1 3113 }
f5db5906 3114 } else {
3115 control->ras_num_mca_recs = control->ras_num_recs;
a8f921a1 3116 }
772df3df
TZ
3117 }
3118
1f06e7f3
TZ
3119 ret = amdgpu_ras_eeprom_check(control);
3120 if (ret)
3121 goto out;
3122
3123 /* HW not usable */
3124 if (amdgpu_ras_is_rma(adev)) {
3125 ret = -EHWPOISON;
3126 goto out;
3127 }
3128
a8d133e6 3129 ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs, true);
772df3df 3130 }
78ad00c9 3131
1f06e7f3 3132out:
78ad00c9
TZ
3133 kfree(bps);
3134 return ret;
3135}
3136
676deb38
DL
3137static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
3138 uint64_t addr)
3139{
3140 struct ras_err_handler_data *data = con->eh_data;
3141 int i;
3142
3143 addr >>= AMDGPU_GPU_PAGE_SHIFT;
3144 for (i = 0; i < data->count; i++)
3145 if (addr == data->bps[i].retired_page)
3146 return true;
3147
3148 return false;
3149}
3150
6e4be987
TZ
3151/*
3152 * check if an address belongs to bad page
3153 *
3154 * Note: this check is only for umc block
3155 */
3156static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
3157 uint64_t addr)
3158{
3159 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
6e4be987
TZ
3160 bool ret = false;
3161
3162 if (!con || !con->eh_data)
3163 return ret;
3164
3165 mutex_lock(&con->recovery_lock);
676deb38 3166 ret = amdgpu_ras_check_bad_page_unlock(con, addr);
6e4be987
TZ
3167 mutex_unlock(&con->recovery_lock);
3168 return ret;
3169}
3170
e5c04edf 3171static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
e4e6a589 3172 uint32_t max_count)
c84d4670 3173{
e5c04edf 3174 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
c84d4670
GC
3175
3176 /*
16b85a09
HZ
3177 * amdgpu_bad_page_threshold is used to config
3178 * the threshold for the number of bad pages.
3179 * -1: Threshold is set to default value
3180 * Driver will issue a warning message when threshold is reached
3181 * and continue runtime services.
3182 * 0: Disable bad page retirement
3183 * Driver will not retire bad pages
3184 * which is intended for debugging purpose.
3185 * -2: Threshold is determined by a formula
3186 * that assumes 1 bad page per 100M of local memory.
3187 * Driver will continue runtime services when threhold is reached.
3188 * 0 < threshold < max number of bad page records in EEPROM,
3189 * A user-defined threshold is set
3190 * Driver will halt runtime services when this custom threshold is reached.
c84d4670 3191 */
16b85a09 3192 if (amdgpu_bad_page_threshold == -2) {
e4e6a589 3193 u64 val = adev->gmc.mc_vram_size;
c84d4670 3194
e4e6a589 3195 do_div(val, RAS_BAD_PAGE_COVER);
e5c04edf 3196 con->bad_page_cnt_threshold = min(lower_32_bits(val),
e4e6a589 3197 max_count);
16b85a09
HZ
3198 } else if (amdgpu_bad_page_threshold == -1) {
3199 con->bad_page_cnt_threshold = ((con->reserved_pages_in_bytes) >> 21) << 4;
e5c04edf 3200 } else {
e4e6a589
LT
3201 con->bad_page_cnt_threshold = min_t(int, max_count,
3202 amdgpu_bad_page_threshold);
c84d4670
GC
3203 }
3204}
3205
98b5bc87
YC
3206int amdgpu_ras_put_poison_req(struct amdgpu_device *adev,
3207 enum amdgpu_ras_block block, uint16_t pasid,
3208 pasid_notify pasid_fn, void *data, uint32_t reset)
3209{
3210 int ret = 0;
3211 struct ras_poison_msg poison_msg;
3212 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3213
3214 memset(&poison_msg, 0, sizeof(poison_msg));
3215 poison_msg.block = block;
3216 poison_msg.pasid = pasid;
3217 poison_msg.reset = reset;
3218 poison_msg.pasid_fn = pasid_fn;
3219 poison_msg.data = data;
3220
3221 ret = kfifo_put(&con->poison_fifo, poison_msg);
3222 if (!ret) {
3223 dev_err(adev->dev, "Poison message fifo is full!\n");
3224 return -ENOSPC;
3225 }
3226
3227 return 0;
3228}
3229
98b5bc87
YC
3230static int amdgpu_ras_get_poison_req(struct amdgpu_device *adev,
3231 struct ras_poison_msg *poison_msg)
3232{
3233 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3234
3235 return kfifo_get(&con->poison_fifo, poison_msg);
3236}
98b5bc87 3237
f493dd64
YC
3238static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log)
3239{
3240 mutex_init(&ecc_log->lock);
3241
f493dd64 3242 INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
78146c1d
YC
3243 ecc_log->de_queried_count = 0;
3244 ecc_log->prev_de_queried_count = 0;
f493dd64
YC
3245}
3246
3247static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
3248{
3249 struct radix_tree_iter iter;
3250 void __rcu **slot;
3251 struct ras_ecc_err *ecc_err;
3252
3253 mutex_lock(&ecc_log->lock);
3254 radix_tree_for_each_slot(slot, &ecc_log->de_page_tree, &iter, 0) {
3255 ecc_err = radix_tree_deref_slot(slot);
3256 kfree(ecc_err->err_pages.pfn);
3257 kfree(ecc_err);
3258 radix_tree_iter_delete(&ecc_log->de_page_tree, &iter, slot);
3259 }
3260 mutex_unlock(&ecc_log->lock);
3261
3262 mutex_destroy(&ecc_log->lock);
78146c1d
YC
3263 ecc_log->de_queried_count = 0;
3264 ecc_log->prev_de_queried_count = 0;
f493dd64 3265}
a734adfb 3266
c0470691
YC
3267static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con,
3268 uint32_t delayed_ms)
3269{
3270 int ret;
3271
3272 mutex_lock(&con->umc_ecc_log.lock);
3273 ret = radix_tree_tagged(&con->umc_ecc_log.de_page_tree,
3274 UMC_ECC_NEW_DETECTED_TAG);
3275 mutex_unlock(&con->umc_ecc_log.lock);
3276
3277 if (ret)
3278 schedule_delayed_work(&con->page_retirement_dwork,
3279 msecs_to_jiffies(delayed_ms));
3280
3281 return ret ? true : false;
3282}
3283
2cf8e50e
YC
3284static void amdgpu_ras_do_page_retirement(struct work_struct *work)
3285{
3286 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
3287 page_retirement_dwork.work);
3288 struct amdgpu_device *adev = con->adev;
3289 struct ras_err_data err_data;
5f7697bb 3290 unsigned long err_cnt;
2cf8e50e 3291
e23300df
YC
3292 /* If gpu reset is ongoing, delay retiring the bad pages */
3293 if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) {
3294 amdgpu_ras_schedule_retirement_dwork(con,
3295 AMDGPU_RAS_RETIRE_PAGE_INTERVAL * 3);
2cf8e50e 3296 return;
e23300df 3297 }
2cf8e50e
YC
3298
3299 amdgpu_ras_error_data_init(&err_data);
3300
3301 amdgpu_umc_handle_bad_pages(adev, &err_data);
5f7697bb 3302 err_cnt = err_data.err_addr_cnt;
2cf8e50e
YC
3303
3304 amdgpu_ras_error_data_fini(&err_data);
3305
792be2e2 3306 if (err_cnt && amdgpu_ras_is_rma(adev))
5f7697bb
TZ
3307 amdgpu_ras_reset_gpu(adev);
3308
c0470691
YC
3309 amdgpu_ras_schedule_retirement_dwork(con,
3310 AMDGPU_RAS_RETIRE_PAGE_INTERVAL);
2cf8e50e
YC
3311}
3312
78146c1d
YC
3313static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
3314 uint32_t poison_creation_count)
a734adfb
YC
3315{
3316 int ret = 0;
3317 struct ras_ecc_log_info *ecc_log;
3318 struct ras_query_if info;
78146c1d 3319 uint32_t timeout = 0;
a734adfb 3320 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
78146c1d
YC
3321 uint64_t de_queried_count;
3322 uint32_t new_detect_count, total_detect_count;
3323 uint32_t need_query_count = poison_creation_count;
5b9de259 3324 enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
a734adfb
YC
3325
3326 memset(&info, 0, sizeof(info));
3ca73073 3327 info.head.block = AMDGPU_RAS_BLOCK__UMC;
a734adfb
YC
3328
3329 ecc_log = &ras->umc_ecc_log;
78146c1d 3330 total_detect_count = 0;
a734adfb 3331 do {
5b9de259 3332 ret = amdgpu_ras_query_error_status_with_event(adev, &info, type);
78146c1d
YC
3333 if (ret)
3334 return ret;
3335
3336 de_queried_count = ecc_log->de_queried_count;
3337 if (de_queried_count > ecc_log->prev_de_queried_count) {
3338 new_detect_count = de_queried_count - ecc_log->prev_de_queried_count;
3339 ecc_log->prev_de_queried_count = de_queried_count;
3340 timeout = 0;
3341 } else {
3342 new_detect_count = 0;
a734adfb
YC
3343 }
3344
78146c1d
YC
3345 if (new_detect_count) {
3346 total_detect_count += new_detect_count;
3347 } else {
3348 if (!timeout && need_query_count)
3349 timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
3350
3351 if (timeout) {
a6d6a86e 3352 if (!--timeout)
78146c1d 3353 break;
78146c1d
YC
3354 msleep(1);
3355 }
a734adfb 3356 }
78146c1d 3357 } while (total_detect_count < need_query_count);
a734adfb 3358
78146c1d 3359 if (total_detect_count)
3ca73073 3360 schedule_delayed_work(&ras->page_retirement_dwork, 0);
78146c1d
YC
3361
3362 return 0;
a734adfb
YC
3363}
3364
f852c979
YC
3365static void amdgpu_ras_clear_poison_fifo(struct amdgpu_device *adev)
3366{
3367 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3368 struct ras_poison_msg msg;
3369 int ret;
3370
3371 do {
3372 ret = kfifo_get(&con->poison_fifo, &msg);
3373 } while (ret);
3374}
3375
370fbff4 3376static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
e278849c 3377 uint32_t msg_count, uint32_t *gpu_reset)
370fbff4
YC
3378{
3379 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
e278849c
YC
3380 uint32_t reset_flags = 0, reset = 0;
3381 struct ras_poison_msg msg;
3382 int ret, i;
370fbff4
YC
3383
3384 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
3385
e278849c
YC
3386 for (i = 0; i < msg_count; i++) {
3387 ret = amdgpu_ras_get_poison_req(adev, &msg);
3388 if (!ret)
3389 continue;
3390
3391 if (msg.pasid_fn)
3392 msg.pasid_fn(adev, msg.pasid, msg.data);
3393
3394 reset_flags |= msg.reset;
3395 }
370fbff4 3396
5f7697bb 3397 /* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */
792be2e2 3398 if (reset_flags && !amdgpu_ras_is_rma(adev)) {
e278849c
YC
3399 if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET)
3400 reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
3401 else if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET)
3402 reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
3403 else
3404 reset = reset_flags;
3405
370fbff4
YC
3406 flush_delayed_work(&con->page_retirement_dwork);
3407
3408 con->gpu_reset_flags |= reset;
3409 amdgpu_ras_reset_gpu(adev);
e278849c
YC
3410
3411 *gpu_reset = reset;
f852c979
YC
3412
3413 /* Wait for gpu recovery to complete */
3414 flush_work(&con->recovery_work);
370fbff4
YC
3415 }
3416
3417 return 0;
3418}
3419
3fdcd0a3
YC
3420static int amdgpu_ras_page_retirement_thread(void *param)
3421{
3422 struct amdgpu_device *adev = (struct amdgpu_device *)param;
3423 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
e278849c
YC
3424 uint32_t poison_creation_count, msg_count;
3425 uint32_t gpu_reset;
5f08275c 3426 int ret;
3fdcd0a3
YC
3427
3428 while (!kthread_should_stop()) {
3429
3430 wait_event_interruptible(con->page_retirement_wq,
c84a7e21 3431 kthread_should_stop() ||
3fdcd0a3
YC
3432 atomic_read(&con->page_retirement_req_cnt));
3433
c84a7e21
MJ
3434 if (kthread_should_stop())
3435 break;
3436
e278849c 3437 gpu_reset = 0;
5f08275c
YC
3438
3439 do {
3440 poison_creation_count = atomic_read(&con->poison_creation_count);
3441 ret = amdgpu_ras_poison_creation_handler(adev, poison_creation_count);
3442 if (ret == -EIO)
3443 break;
3444
3445 if (poison_creation_count) {
3446 atomic_sub(poison_creation_count, &con->poison_creation_count);
3447 atomic_sub(poison_creation_count, &con->page_retirement_req_cnt);
3448 }
3449 } while (atomic_read(&con->poison_creation_count));
6c23f3d1 3450
e278849c
YC
3451 if (ret != -EIO) {
3452 msg_count = kfifo_len(&con->poison_fifo);
3453 if (msg_count) {
3454 ret = amdgpu_ras_poison_consumption_handler(adev,
3455 msg_count, &gpu_reset);
3456 if ((ret != -EIO) &&
3457 (gpu_reset != AMDGPU_RAS_GPU_RESET_MODE1_RESET))
3458 atomic_sub(msg_count, &con->page_retirement_req_cnt);
3459 }
3460 }
f852c979
YC
3461
3462 if ((ret == -EIO) || (gpu_reset == AMDGPU_RAS_GPU_RESET_MODE1_RESET)) {
3463 /* gpu mode-1 reset is ongoing or just completed ras mode-1 reset */
3464 /* Clear poison creation request */
3465 atomic_set(&con->poison_creation_count, 0);
3466
3467 /* Clear poison fifo */
3468 amdgpu_ras_clear_poison_fifo(adev);
3469
3470 /* Clear all poison requests */
3471 atomic_set(&con->page_retirement_req_cnt, 0);
3472
3473 if (ret == -EIO) {
3474 /* Wait for mode-1 reset to complete */
3475 down_read(&adev->reset_domain->sem);
3476 up_read(&adev->reset_domain->sem);
3477 }
3478
3479 /* Wake up work to save bad pages to eeprom */
3480 schedule_delayed_work(&con->page_retirement_dwork, 0);
3481 } else if (gpu_reset) {
3482 /* gpu just completed mode-2 reset or other reset */
3483 /* Clear poison consumption messages cached in fifo */
3484 msg_count = kfifo_len(&con->poison_fifo);
3485 if (msg_count) {
3486 amdgpu_ras_clear_poison_fifo(adev);
3487 atomic_sub(msg_count, &con->page_retirement_req_cnt);
3488 }
3489
3490 /* Wake up work to save bad pages to eeprom */
3491 schedule_delayed_work(&con->page_retirement_dwork, 0);
3492 }
3fdcd0a3
YC
3493 }
3494
3495 return 0;
3496}
3497
b17f8732
LL
3498int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
3499{
3500 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
772df3df 3501 struct amdgpu_ras_eeprom_control *control;
b17f8732
LL
3502 int ret;
3503
3504 if (!con || amdgpu_sriov_vf(adev))
3505 return 0;
3506
772df3df
TZ
3507 control = &con->eeprom_control;
3508 ret = amdgpu_ras_eeprom_init(control);
cfce8f4f 3509 control->is_eeprom_valid = !ret;
b17f8732 3510
772df3df 3511 if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr)
a8f921a1 3512 control->ras_num_pa_recs = control->ras_num_recs;
772df3df 3513
699bff37
TZ
3514 if (adev->umc.ras &&
3515 adev->umc.ras->get_retire_flip_bits)
3516 adev->umc.ras->get_retire_flip_bits(adev);
3517
cfce8f4f 3518 if (control->ras_num_recs && control->is_eeprom_valid) {
b17f8732 3519 ret = amdgpu_ras_load_bad_pages(adev);
cfce8f4f 3520 if (ret) {
3521 control->is_eeprom_valid = false;
3522 return 0;
3523 }
b17f8732
LL
3524
3525 amdgpu_dpm_send_hbm_bad_pages_num(
ae756cd8 3526 adev, control->ras_num_bad_pages);
b17f8732
LL
3527
3528 if (con->update_channel_flag == true) {
3529 amdgpu_dpm_send_hbm_bad_channel_flag(
772df3df 3530 adev, control->bad_channel_bitmap);
b17f8732
LL
3531 con->update_channel_flag = false;
3532 }
05d50ea3
TZ
3533
3534 /* The format action is only applied to new ASICs */
3535 if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) >= 12 &&
3536 control->tbl_hdr.version < RAS_TABLE_VER_V3)
3537 if (!amdgpu_ras_eeprom_reset_table(control))
3538 if (amdgpu_ras_save_bad_pages(adev, NULL))
3539 dev_warn(adev->dev, "Failed to format RAS EEPROM data in V3 version!\n");
b17f8732
LL
3540 }
3541
cfce8f4f 3542 return 0;
b17f8732
LL
3543}
3544
3545int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
c030f2e4 3546{
3547 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4d1337d2 3548 struct ras_err_handler_data **data;
e4e6a589 3549 u32 max_eeprom_records_count = 0;
78ad00c9 3550 int ret;
c030f2e4 3551
e0e146d5 3552 if (!con || amdgpu_sriov_vf(adev))
1d9d2ca8
LT
3553 return 0;
3554
3555 /* Allow access to RAS EEPROM via debugfs, when the ASIC
3556 * supports RAS and debugfs is enabled, but when
3557 * adev->ras_enabled is unset, i.e. when "ras_enable"
3558 * module parameter is set to 0.
3559 */
3560 con->adev = adev;
3561
3562 if (!adev->ras_enabled)
4d1337d2
AG
3563 return 0;
3564
1d9d2ca8 3565 data = &con->eh_data;
091411be 3566 *data = kzalloc(sizeof(**data), GFP_KERNEL);
1a6fc071
TZ
3567 if (!*data) {
3568 ret = -ENOMEM;
3569 goto out;
3570 }
c030f2e4 3571
3572 mutex_init(&con->recovery_lock);
3573 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
3574 atomic_set(&con->in_recovery, 0);
69691c82 3575 con->eeprom_control.bad_channel_bitmap = 0;
c030f2e4 3576
7f599fed 3577 max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
e4e6a589 3578 amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
c84d4670 3579
b17f8732
LL
3580 if (init_bp_info) {
3581 ret = amdgpu_ras_init_badpage_info(adev);
78ad00c9 3582 if (ret)
1a6fc071 3583 goto free;
78ad00c9 3584 }
c030f2e4 3585
af730e08 3586 mutex_init(&con->page_rsv_lock);
98b5bc87 3587 INIT_KFIFO(con->poison_fifo);
3fdcd0a3
YC
3588 mutex_init(&con->page_retirement_lock);
3589 init_waitqueue_head(&con->page_retirement_wq);
3590 atomic_set(&con->page_retirement_req_cnt, 0);
5f08275c 3591 atomic_set(&con->poison_creation_count, 0);
3fdcd0a3
YC
3592 con->page_retirement_thread =
3593 kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement");
3594 if (IS_ERR(con->page_retirement_thread)) {
3595 con->page_retirement_thread = NULL;
3596 dev_warn(adev->dev, "Failed to create umc_page_retirement thread!!!\n");
3597 }
3598
2cf8e50e 3599 INIT_DELAYED_WORK(&con->page_retirement_dwork, amdgpu_ras_do_page_retirement);
f493dd64 3600 amdgpu_ras_ecc_log_init(&con->umc_ecc_log);
12b2cab7
MJ
3601#ifdef CONFIG_X86_MCE_AMD
3602 if ((adev->asic_type == CHIP_ALDEBARAN) &&
3603 (adev->gmc.xgmi.connected_to_cpu))
91a1a52d 3604 amdgpu_register_bad_pages_mca_notifier(adev);
12b2cab7 3605#endif
c030f2e4 3606 return 0;
1a6fc071 3607
1a6fc071 3608free:
1a6fc071 3609 kfree((*data)->bps);
1a6fc071 3610 kfree(*data);
1995b3a3 3611 con->eh_data = NULL;
1a6fc071 3612out:
cf696091 3613 dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
1a6fc071 3614
b82e65a9
GC
3615 /*
3616 * Except error threshold exceeding case, other failure cases in this
3617 * function would not fail amdgpu driver init.
3618 */
792be2e2 3619 if (!amdgpu_ras_is_rma(adev))
b82e65a9
GC
3620 ret = 0;
3621 else
3622 ret = -EINVAL;
3623
1a6fc071 3624 return ret;
c030f2e4 3625}
3626
3627static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
3628{
3629 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3630 struct ras_err_handler_data *data = con->eh_data;
c0470691
YC
3631 int max_flush_timeout = MAX_FLUSH_RETIRE_DWORK_TIMES;
3632 bool ret;
c030f2e4 3633
1a6fc071
TZ
3634 /* recovery_init failed to init it, fini is useless */
3635 if (!data)
3636 return 0;
3637
c0470691
YC
3638 /* Save all cached bad pages to eeprom */
3639 do {
3640 flush_delayed_work(&con->page_retirement_dwork);
3641 ret = amdgpu_ras_schedule_retirement_dwork(con, 0);
3642 } while (ret && max_flush_timeout--);
3643
3fdcd0a3
YC
3644 if (con->page_retirement_thread)
3645 kthread_stop(con->page_retirement_thread);
3646
3647 atomic_set(&con->page_retirement_req_cnt, 0);
5f08275c 3648 atomic_set(&con->poison_creation_count, 0);
3fdcd0a3 3649
af730e08
YC
3650 mutex_destroy(&con->page_rsv_lock);
3651
c030f2e4 3652 cancel_work_sync(&con->recovery_work);
c030f2e4 3653
2cf8e50e
YC
3654 cancel_delayed_work_sync(&con->page_retirement_dwork);
3655
f493dd64
YC
3656 amdgpu_ras_ecc_log_fini(&con->umc_ecc_log);
3657
c030f2e4 3658 mutex_lock(&con->recovery_lock);
3659 con->eh_data = NULL;
3660 kfree(data->bps);
3661 kfree(data);
3662 mutex_unlock(&con->recovery_lock);
3663
3664 return 0;
3665}
3666/* recovery end */
3667
084e2640 3668static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
5436ab94 3669{
82835055 3670 if (amdgpu_sriov_vf(adev)) {
4e8303cf 3671 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
82835055 3672 case IP_VERSION(13, 0, 2):
80578f16 3673 case IP_VERSION(13, 0, 6):
9a826c4a 3674 case IP_VERSION(13, 0, 12):
1dbd59f3 3675 case IP_VERSION(13, 0, 14):
82835055
YC
3676 return true;
3677 default:
3678 return false;
3679 }
3680 }
3681
073285ef 3682 if (adev->asic_type == CHIP_IP_DISCOVERY) {
4e8303cf 3683 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
073285ef 3684 case IP_VERSION(13, 0, 0):
cb906ce3 3685 case IP_VERSION(13, 0, 6):
073285ef 3686 case IP_VERSION(13, 0, 10):
9a826c4a 3687 case IP_VERSION(13, 0, 12):
1dbd59f3 3688 case IP_VERSION(13, 0, 14):
d1ebe307 3689 case IP_VERSION(14, 0, 3):
073285ef
YC
3690 return true;
3691 default:
3692 return false;
3693 }
3694 }
3695
084e2640
LT
3696 return adev->asic_type == CHIP_VEGA10 ||
3697 adev->asic_type == CHIP_VEGA20 ||
3698 adev->asic_type == CHIP_ARCTURUS ||
75f06251 3699 adev->asic_type == CHIP_ALDEBARAN ||
084e2640 3700 adev->asic_type == CHIP_SIENNA_CICHLID;
5436ab94
SY
3701}
3702
f50160cf
SY
3703/*
3704 * this is workaround for vega20 workstation sku,
3705 * force enable gfx ras, ignore vbios gfx ras flag
3706 * due to GC EDC can not write
3707 */
e509965e 3708static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
f50160cf
SY
3709{
3710 struct atom_context *ctx = adev->mode_info.atom_context;
3711
3712 if (!ctx)
3713 return;
3714
adf64e21
ML
3715 if (strnstr(ctx->vbios_pn, "D16406",
3716 sizeof(ctx->vbios_pn)) ||
3717 strnstr(ctx->vbios_pn, "D36002",
3718 sizeof(ctx->vbios_pn)))
8ab0d6f0 3719 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
f50160cf
SY
3720}
3721
4e2965bd
HZ
3722/* Query ras capablity via atomfirmware interface */
3723static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev)
3724{
3725 /* mem_ecc cap */
3726 if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
3727 dev_info(adev->dev, "MEM ECC is active.\n");
3728 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
3729 1 << AMDGPU_RAS_BLOCK__DF);
3730 } else {
3731 dev_info(adev->dev, "MEM ECC is not presented.\n");
3732 }
3733
3734 /* sram_ecc cap */
3735 if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
3736 dev_info(adev->dev, "SRAM ECC is active.\n");
3737 if (!amdgpu_sriov_vf(adev))
3738 adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
3739 1 << AMDGPU_RAS_BLOCK__DF);
3740 else
3741 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
3742 1 << AMDGPU_RAS_BLOCK__SDMA |
3743 1 << AMDGPU_RAS_BLOCK__GFX);
3744
3745 /*
3746 * VCN/JPEG RAS can be supported on both bare metal and
3747 * SRIOV environment
3748 */
3749 if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) ||
3750 amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) ||
b758667f
MG
3751 amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3) ||
3752 amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(5, 0, 1))
4e2965bd
HZ
3753 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
3754 1 << AMDGPU_RAS_BLOCK__JPEG);
3755 else
3756 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
3757 1 << AMDGPU_RAS_BLOCK__JPEG);
3758
3759 /*
3760 * XGMI RAS is not supported if xgmi num physical nodes
3761 * is zero
3762 */
3763 if (!adev->gmc.xgmi.num_physical_nodes)
3764 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
3765 } else {
3766 dev_info(adev->dev, "SRAM ECC is not presented.\n");
3767 }
3768}
3769
3770/* Query poison mode from umc/df IP callbacks */
3771static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
3772{
3773 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3774 bool df_poison, umc_poison;
3775
3776 /* poison setting is useless on SRIOV guest */
3777 if (amdgpu_sriov_vf(adev) || !con)
3778 return;
3779
3780 /* Init poison supported flag, the default value is false */
3781 if (adev->gmc.xgmi.connected_to_cpu ||
3782 adev->gmc.is_app_apu) {
3783 /* enabled by default when GPU is connected to CPU */
3784 con->poison_supported = true;
3785 } else if (adev->df.funcs &&
3786 adev->df.funcs->query_ras_poison_mode &&
3787 adev->umc.ras &&
3788 adev->umc.ras->query_ras_poison_mode) {
3789 df_poison =
3790 adev->df.funcs->query_ras_poison_mode(adev);
3791 umc_poison =
3792 adev->umc.ras->query_ras_poison_mode(adev);
3793
3794 /* Only poison is set in both DF and UMC, we can support it */
3795 if (df_poison && umc_poison)
3796 con->poison_supported = true;
3797 else if (df_poison != umc_poison)
3798 dev_warn(adev->dev,
3799 "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
3800 df_poison, umc_poison);
3801 }
3802}
3803
5caf466a 3804/*
3805 * check hardware's ras ability which will be saved in hw_supported.
3806 * if hardware does not support ras, we can skip some ras initializtion and
3807 * forbid some ras operations from IP.
3808 * if software itself, say boot parameter, limit the ras ability. We still
3809 * need allow IP do some limited operations, like disable. In such case,
3810 * we have to initialize ras as normal. but need check if operation is
3811 * allowed or not in each function.
3812 */
e509965e 3813static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
c030f2e4 3814{
8ab0d6f0 3815 adev->ras_hw_enabled = adev->ras_enabled = 0;
c030f2e4 3816
38298ce6 3817 if (!amdgpu_ras_asic_supported(adev))
5caf466a 3818 return;
b404ae82 3819
907fec2d
VS
3820 if (amdgpu_sriov_vf(adev)) {
3821 if (amdgpu_virt_get_ras_capability(adev))
3822 goto init_ras_enabled_flag;
3823 }
3824
4e2965bd
HZ
3825 /* query ras capability from psp */
3826 if (amdgpu_psp_get_ras_capability(&adev->psp))
3827 goto init_ras_enabled_flag;
58bc2a9c 3828
4e2965bd
HZ
3829 /* query ras capablity from bios */
3830 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
3831 amdgpu_ras_query_ras_capablity_from_vbios(adev);
75f06251
HZ
3832 } else {
3833 /* driver only manages a few IP blocks RAS feature
3834 * when GPU is connected cpu through XGMI */
8ab0d6f0 3835 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
e509965e
LT
3836 1 << AMDGPU_RAS_BLOCK__SDMA |
3837 1 << AMDGPU_RAS_BLOCK__MMHUB);
75f06251 3838 }
88474cca 3839
4e2965bd 3840 /* apply asic specific settings (vega20 only for now) */
e509965e 3841 amdgpu_ras_get_quirks(adev);
f50160cf 3842
4e2965bd
HZ
3843 /* query poison mode from umc/df ip callback */
3844 amdgpu_ras_query_poison_mode(adev);
3845
3846init_ras_enabled_flag:
88474cca 3847 /* hw_supported needs to be aligned with RAS block mask. */
8ab0d6f0 3848 adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
b404ae82 3849
66d64e4e
SY
3850 adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
3851 adev->ras_hw_enabled & amdgpu_ras_mask;
04c4fcd2 3852
13c13bdd 3853 /* aca is disabled by default except for psp v13_0_6/v13_0_12/v13_0_14 */
3394069e
VS
3854 if (!amdgpu_sriov_vf(adev)) {
3855 adev->aca.is_enabled =
3856 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
3857 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
3858 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14));
3859 }
c389a060
TZ
3860
3861 /* bad page feature is not applicable to specific app platform */
3862 if (adev->gmc.is_app_apu &&
3863 amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(12, 0, 0))
3864 amdgpu_bad_page_threshold = 0;
c030f2e4 3865}
3866
05adfd80
LT
3867static void amdgpu_ras_counte_dw(struct work_struct *work)
3868{
3869 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
3870 ras_counte_delay_work.work);
3871 struct amdgpu_device *adev = con->adev;
a3fbb0d8 3872 struct drm_device *dev = adev_to_drm(adev);
05adfd80
LT
3873 unsigned long ce_count, ue_count;
3874 int res;
3875
3876 res = pm_runtime_get_sync(dev->dev);
3877 if (res < 0)
3878 goto Out;
3879
3880 /* Cache new values.
3881 */
4a1c9a44 3882 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
4d9f771e
LT
3883 atomic_set(&con->ras_ce_count, ce_count);
3884 atomic_set(&con->ras_ue_count, ue_count);
3885 }
05adfd80
LT
3886
3887 pm_runtime_mark_last_busy(dev->dev);
3888Out:
3889 pm_runtime_put_autosuspend(dev->dev);
3890}
3891
625e5f38
AK
3892static int amdgpu_get_ras_schema(struct amdgpu_device *adev)
3893{
3894 return amdgpu_ras_is_poison_mode_supported(adev) ? AMDGPU_RAS_ERROR__POISON : 0 |
3895 AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE |
3896 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE |
3897 AMDGPU_RAS_ERROR__PARITY;
3898}
3899
9dc57c2a
YW
3900static void ras_event_mgr_init(struct ras_event_manager *mgr)
3901{
59f488be 3902 struct ras_event_state *event_state;
9dc57c2a
YW
3903 int i;
3904
75ac6a25
YW
3905 memset(mgr, 0, sizeof(*mgr));
3906 atomic64_set(&mgr->seqno, 0);
3907
59f488be
YW
3908 for (i = 0; i < ARRAY_SIZE(mgr->event_state); i++) {
3909 event_state = &mgr->event_state[i];
3910 event_state->last_seqno = RAS_EVENT_INVALID_ID;
3911 atomic64_set(&event_state->count, 0);
3912 }
9dc57c2a
YW
3913}
3914
3915static void amdgpu_ras_event_mgr_init(struct amdgpu_device *adev)
3916{
3917 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3918 struct amdgpu_hive_info *hive;
3919
3920 if (!ras)
3921 return;
3922
3923 hive = amdgpu_get_xgmi_hive(adev);
3924 ras->event_mgr = hive ? &hive->event_mgr : &ras->__event_mgr;
3925
3926 /* init event manager with node 0 on xgmi system */
e283f4fb 3927 if (!amdgpu_reset_in_recovery(adev)) {
9dc57c2a
YW
3928 if (!hive || adev->gmc.xgmi.node_id == 0)
3929 ras_event_mgr_init(ras->event_mgr);
3930 }
3931
3932 if (hive)
3933 amdgpu_put_xgmi_hive(hive);
3934}
3935
473af28d
HZ
3936static void amdgpu_ras_init_reserved_vram_size(struct amdgpu_device *adev)
3937{
3938 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3939
3940 if (!con || (adev->flags & AMD_IS_APU))
3941 return;
3942
3943 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3944 case IP_VERSION(13, 0, 2):
3945 case IP_VERSION(13, 0, 6):
9a826c4a 3946 case IP_VERSION(13, 0, 12):
16b85a09
HZ
3947 con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT;
3948 break;
473af28d 3949 case IP_VERSION(13, 0, 14):
16b85a09 3950 con->reserved_pages_in_bytes = (AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT << 1);
473af28d
HZ
3951 break;
3952 default:
3953 break;
3954 }
3955}
3956
c030f2e4 3957int amdgpu_ras_init(struct amdgpu_device *adev)
3958{
3959 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4e644fff 3960 int r;
c030f2e4 3961
b404ae82 3962 if (con)
c030f2e4 3963 return 0;
3964
091411be 3965 con = kzalloc(sizeof(*con) +
640ae42e
JC
3966 sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
3967 sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
091411be 3968 GFP_KERNEL);
c030f2e4 3969 if (!con)
3970 return -ENOMEM;
3971
05adfd80
LT
3972 con->adev = adev;
3973 INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
3974 atomic_set(&con->ras_ce_count, 0);
3975 atomic_set(&con->ras_ue_count, 0);
3976
c030f2e4 3977 con->objs = (struct ras_manager *)(con + 1);
3978
3979 amdgpu_ras_set_context(adev, con);
3980
e509965e
LT
3981 amdgpu_ras_check_supported(adev);
3982
7ddd9770 3983 if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
970fd197
SY
3984 /* set gfx block ras context feature for VEGA20 Gaming
3985 * send ras disable cmd to ras ta during ras late init.
3986 */
8ab0d6f0 3987 if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
970fd197
SY
3988 con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
3989
3990 return 0;
3991 }
3992
5e91160a 3993 r = 0;
5436ab94 3994 goto release_con;
fb2a3607
HZ
3995 }
3996
69691c82 3997 con->update_channel_flag = false;
c030f2e4 3998 con->features = 0;
625e5f38 3999 con->schema = 0;
c030f2e4 4000 INIT_LIST_HEAD(&con->head);
108c6a63 4001 /* Might need get this flag from vbios. */
4002 con->flags = RAS_DEFAULT_FLAGS;
c030f2e4 4003
6e36f231
HZ
4004 /* initialize nbio ras function ahead of any other
4005 * ras functions so hardware fatal error interrupt
4006 * can be enabled as early as possible */
4e8303cf 4007 switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
fdc94d3a
HZ
4008 case IP_VERSION(7, 4, 0):
4009 case IP_VERSION(7, 4, 1):
4010 case IP_VERSION(7, 4, 4):
4011 if (!adev->gmc.xgmi.connected_to_cpu)
2e54fe5d 4012 adev->nbio.ras = &nbio_v7_4_ras;
6e36f231 4013 break;
9af357bc
HZ
4014 case IP_VERSION(4, 3, 0):
4015 if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
4016 /* unlike other generation of nbio ras,
4017 * nbio v4_3 only support fatal error interrupt
4018 * to inform software that DF is freezed due to
4019 * system fatal error event. driver should not
4020 * enable nbio ras in such case. Instead,
4021 * check DF RAS */
4022 adev->nbio.ras = &nbio_v4_3_ras;
4023 break;
ecd1191e
CL
4024 case IP_VERSION(6, 3, 1):
4025 if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
4026 /* unlike other generation of nbio ras,
4027 * nbif v6_3_1 only support fatal error interrupt
4028 * to inform software that DF is freezed due to
4029 * system fatal error event. driver should not
4030 * enable nbio ras in such case. Instead,
4031 * check DF RAS
4032 */
4033 adev->nbio.ras = &nbif_v6_3_1_ras;
4034 break;
7692e1ee 4035 case IP_VERSION(7, 9, 0):
9a826c4a 4036 case IP_VERSION(7, 9, 1):
7692e1ee
TZ
4037 if (!adev->gmc.is_app_apu)
4038 adev->nbio.ras = &nbio_v7_9_ras;
4039 break;
6e36f231
HZ
4040 default:
4041 /* nbio ras is not available */
4042 break;
4043 }
4044
fdc94d3a
HZ
4045 /* nbio ras block needs to be enabled ahead of other ras blocks
4046 * to handle fatal error */
4047 r = amdgpu_nbio_ras_sw_init(adev);
4048 if (r)
4049 return r;
4050
2e54fe5d 4051 if (adev->nbio.ras &&
4052 adev->nbio.ras->init_ras_controller_interrupt) {
4053 r = adev->nbio.ras->init_ras_controller_interrupt(adev);
4e644fff 4054 if (r)
5436ab94 4055 goto release_con;
4e644fff
HZ
4056 }
4057
2e54fe5d 4058 if (adev->nbio.ras &&
4059 adev->nbio.ras->init_ras_err_event_athub_interrupt) {
4060 r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
4e644fff 4061 if (r)
5436ab94 4062 goto release_con;
4e644fff
HZ
4063 }
4064
73cb81dc
HZ
4065 /* Packed socket_id to ras feature mask bits[31:29] */
4066 if (adev->smuio.funcs &&
4067 adev->smuio.funcs->get_socket_id)
ee9c3031
SY
4068 con->features |= ((adev->smuio.funcs->get_socket_id(adev)) <<
4069 AMDGPU_RAS_FEATURES_SOCKETID_SHIFT);
73cb81dc 4070
625e5f38
AK
4071 /* Get RAS schema for particular SOC */
4072 con->schema = amdgpu_get_ras_schema(adev);
4073
473af28d
HZ
4074 amdgpu_ras_init_reserved_vram_size(adev);
4075
5e91160a
GC
4076 if (amdgpu_ras_fs_init(adev)) {
4077 r = -EINVAL;
5436ab94 4078 goto release_con;
5e91160a 4079 }
c030f2e4 4080
9817f061
YW
4081 if (amdgpu_ras_aca_is_supported(adev)) {
4082 if (amdgpu_aca_is_enabled(adev))
4083 r = amdgpu_aca_init(adev);
4084 else
4085 r = amdgpu_mca_init(adev);
4086 if (r)
4087 goto release_con;
4088 }
4089
6952e99c 4090 dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
e509965e 4091 "hardware ability[%x] ras_mask[%x]\n",
8ab0d6f0 4092 adev->ras_hw_enabled, adev->ras_enabled);
e509965e 4093
c030f2e4 4094 return 0;
5436ab94 4095release_con:
c030f2e4 4096 amdgpu_ras_set_context(adev, NULL);
4097 kfree(con);
4098
5e91160a 4099 return r;
c030f2e4 4100}
4101
8f6368a9 4102int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
134d16d5 4103{
8107e499
HZ
4104 if (adev->gmc.xgmi.connected_to_cpu ||
4105 adev->gmc.is_app_apu)
134d16d5
JC
4106 return 1;
4107 return 0;
4108}
4109
4110static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
4111 struct ras_common_if *ras_block)
4112{
4113 struct ras_query_if info = {
4114 .head = *ras_block,
4115 };
4116
4117 if (!amdgpu_persistent_edc_harvesting_supported(adev))
4118 return 0;
4119
4120 if (amdgpu_ras_query_error_status(adev, &info) != 0)
4121 DRM_WARN("RAS init harvest failure");
4122
4123 if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
4124 DRM_WARN("RAS init harvest reset failure");
4125
4126 return 0;
4127}
4128
e4348849
TZ
4129bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
4130{
4131 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4132
4133 if (!con)
4134 return false;
4135
4136 return con->poison_supported;
4137}
4138
b293e891 4139/* helper function to handle common stuff in ip late init phase */
563285c8 4140int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
4141 struct ras_common_if *ras_block)
b293e891 4142{
29c9b6cd 4143 struct amdgpu_ras_block_object *ras_obj = NULL;
05adfd80 4144 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4a1c9a44 4145 struct ras_query_if *query_info;
05adfd80 4146 unsigned long ue_count, ce_count;
b293e891
HZ
4147 int r;
4148
4149 /* disable RAS feature per IP block if it is not supported */
4150 if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
4151 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
4152 return 0;
4153 }
4154
4155 r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
4156 if (r) {
e283f4fb 4157 if (adev->in_suspend || amdgpu_reset_in_recovery(adev)) {
b293e891
HZ
4158 /* in resume phase, if fail to enable ras,
4159 * clean up all ras fs nodes, and disable ras */
4160 goto cleanup;
4161 } else
4162 return r;
4163 }
4164
134d16d5
JC
4165 /* check for errors on warm reset edc persisant supported ASIC */
4166 amdgpu_persistent_edc_harvesting(adev, ras_block);
4167
b293e891 4168 /* in resume phase, no need to create ras fs node */
e283f4fb 4169 if (adev->in_suspend || amdgpu_reset_in_recovery(adev))
b293e891
HZ
4170 return 0;
4171
563285c8 4172 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
36780606
TZ
4173 if (ras_obj->ras_cb || (ras_obj->hw_ops &&
4174 (ras_obj->hw_ops->query_poison_status ||
4175 ras_obj->hw_ops->handle_poison_consumption))) {
9252d33d 4176 r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
b293e891 4177 if (r)
779596ce 4178 goto cleanup;
b293e891
HZ
4179 }
4180
f957138c
HZ
4181 if (ras_obj->hw_ops &&
4182 (ras_obj->hw_ops->query_ras_error_count ||
4183 ras_obj->hw_ops->query_ras_error_status)) {
4184 r = amdgpu_ras_sysfs_create(adev, ras_block);
4185 if (r)
4186 goto interrupt;
b293e891 4187
f957138c
HZ
4188 /* Those are the cached values at init.
4189 */
4190 query_info = kzalloc(sizeof(*query_info), GFP_KERNEL);
4191 if (!query_info)
4192 return -ENOMEM;
4193 memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
4a1c9a44 4194
f957138c
HZ
4195 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
4196 atomic_set(&con->ras_ce_count, ce_count);
4197 atomic_set(&con->ras_ue_count, ue_count);
4198 }
4199
4200 kfree(query_info);
4d9f771e 4201 }
05adfd80 4202
b293e891 4203 return 0;
779596ce
TR
4204
4205interrupt:
563285c8 4206 if (ras_obj->ras_cb)
9252d33d 4207 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
779596ce 4208cleanup:
b293e891
HZ
4209 amdgpu_ras_feature_enable(adev, ras_block, 0);
4210 return r;
4211}
4212
d41ff22a 4213static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
418abce2 4214 struct ras_common_if *ras_block)
4215{
4216 return amdgpu_ras_block_late_init(adev, ras_block);
4217}
4218
b293e891 4219/* helper function to remove ras fs node and interrupt handler */
bdb3489c 4220void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
4221 struct ras_common_if *ras_block)
4222{
563285c8 4223 struct amdgpu_ras_block_object *ras_obj;
bdb3489c 4224 if (!ras_block)
4225 return;
4226
563285c8 4227 amdgpu_ras_sysfs_remove(adev, ras_block);
bdb3489c 4228
563285c8 4229 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
4230 if (ras_obj->ras_cb)
4231 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
bdb3489c 4232}
4233
80e0c2cb 4234static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
4235 struct ras_common_if *ras_block)
4236{
4237 return amdgpu_ras_block_late_fini(adev, ras_block);
4238}
4239
a564808e 4240/* do some init work after IP late init as dependence.
511fdbc3 4241 * and it runs in resume/gpu reset/booting up cases.
a564808e 4242 */
511fdbc3 4243void amdgpu_ras_resume(struct amdgpu_device *adev)
108c6a63 4244{
4245 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4246 struct ras_manager *obj, *tmp;
4247
8ab0d6f0 4248 if (!adev->ras_enabled || !con) {
970fd197
SY
4249 /* clean ras context for VEGA20 Gaming after send ras disable cmd */
4250 amdgpu_release_ras_context(adev);
4251
108c6a63 4252 return;
970fd197 4253 }
108c6a63 4254
108c6a63 4255 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
191051a1 4256 /* Set up all other IPs which are not implemented. There is a
4257 * tricky thing that IP's actual ras error type should be
4258 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
4259 * ERROR_NONE make sense anyway.
4260 */
4261 amdgpu_ras_enable_all_features(adev, 1);
4262
4263 /* We enable ras on all hw_supported block, but as boot
4264 * parameter might disable some of them and one or more IP has
4265 * not implemented yet. So we disable them on behalf.
4266 */
108c6a63 4267 list_for_each_entry_safe(obj, tmp, &con->head, node) {
4268 if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
4269 amdgpu_ras_feature_enable(adev, &obj->head, 0);
4270 /* there should be no any reference. */
4271 WARN_ON(alive_obj(obj));
4272 }
191051a1 4273 }
108c6a63 4274 }
4275}
4276
511fdbc3 4277void amdgpu_ras_suspend(struct amdgpu_device *adev)
4278{
4279 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4280
8ab0d6f0 4281 if (!adev->ras_enabled || !con)
511fdbc3 4282 return;
4283
4284 amdgpu_ras_disable_all_features(adev, 0);
4285 /* Make sure all ras objects are disabled. */
ee9c3031 4286 if (AMDGPU_RAS_GET_FEATURES(con->features))
511fdbc3 4287 amdgpu_ras_disable_all_features(adev, 1);
4288}
4289
867e24ca 4290int amdgpu_ras_late_init(struct amdgpu_device *adev)
4291{
4292 struct amdgpu_ras_block_list *node, *tmp;
4293 struct amdgpu_ras_block_object *obj;
4294 int r;
4295
9dc57c2a
YW
4296 amdgpu_ras_event_mgr_init(adev);
4297
9817f061 4298 if (amdgpu_ras_aca_is_supported(adev)) {
e283f4fb 4299 if (amdgpu_reset_in_recovery(adev)) {
9817f061
YW
4300 if (amdgpu_aca_is_enabled(adev))
4301 r = amdgpu_aca_reset(adev);
4302 else
4303 r = amdgpu_mca_reset(adev);
062a7ce6
YW
4304 if (r)
4305 return r;
4306 }
c0c48f0d 4307
9817f061
YW
4308 if (!amdgpu_sriov_vf(adev)) {
4309 if (amdgpu_aca_is_enabled(adev))
4310 amdgpu_ras_set_aca_debug_mode(adev, false);
4311 else
4312 amdgpu_ras_set_mca_debug_mode(adev, false);
4313 }
c0c48f0d 4314 }
201761b5 4315
329cec8f 4316 /* Guest side doesn't need init ras feature */
84a2947e 4317 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_ras_telemetry_en(adev))
329cec8f
YW
4318 return 0;
4319
867e24ca 4320 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
2866a454
YW
4321 obj = node->ras_obj;
4322 if (!obj) {
867e24ca 4323 dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
4324 continue;
4325 }
418abce2 4326
2866a454
YW
4327 if (!amdgpu_ras_is_supported(adev, obj->ras_comm.block))
4328 continue;
4329
867e24ca 4330 if (obj->ras_late_init) {
4331 r = obj->ras_late_init(adev, &obj->ras_comm);
4332 if (r) {
4333 dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
4334 obj->ras_comm.name, r);
4335 return r;
4336 }
418abce2 4337 } else
4338 amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
867e24ca 4339 }
4340
4341 return 0;
4342}
4343
c030f2e4 4344/* do some fini work before IP fini as dependence */
4345int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
4346{
4347 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4348
8ab0d6f0 4349 if (!adev->ras_enabled || !con)
c030f2e4 4350 return 0;
4351
72c8c97b 4352
c030f2e4 4353 /* Need disable ras on all IPs here before ip [hw/sw]fini */
ee9c3031 4354 if (AMDGPU_RAS_GET_FEATURES(con->features))
642c0401 4355 amdgpu_ras_disable_all_features(adev, 0);
c030f2e4 4356 amdgpu_ras_recovery_fini(adev);
4357 return 0;
4358}
4359
4360int amdgpu_ras_fini(struct amdgpu_device *adev)
4361{
d5e8ff5f 4362 struct amdgpu_ras_block_list *ras_node, *tmp;
1f211a82 4363 struct amdgpu_ras_block_object *obj = NULL;
c030f2e4 4364 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4365
8ab0d6f0 4366 if (!adev->ras_enabled || !con)
c030f2e4 4367 return 0;
4368
1f211a82 4369 list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
4370 if (ras_node->ras_obj) {
4371 obj = ras_node->ras_obj;
4372 if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
4373 obj->ras_fini)
4374 obj->ras_fini(adev, &obj->ras_comm);
80e0c2cb 4375 else
4376 amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
1f211a82 4377 }
4378
4379 /* Clear ras blocks from ras_list and free ras block list node */
4380 list_del(&ras_node->node);
4381 kfree(ras_node);
4382 }
4383
c030f2e4 4384 amdgpu_ras_fs_fini(adev);
4385 amdgpu_ras_interrupt_remove_all(adev);
4386
9817f061
YW
4387 if (amdgpu_ras_aca_is_supported(adev)) {
4388 if (amdgpu_aca_is_enabled(adev))
4389 amdgpu_aca_fini(adev);
4390 else
4391 amdgpu_mca_fini(adev);
4392 }
c0c48f0d 4393
ee9c3031 4394 WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared");
c030f2e4 4395
ee9c3031 4396 if (AMDGPU_RAS_GET_FEATURES(con->features))
edfdde90 4397 amdgpu_ras_disable_all_features(adev, 0);
c030f2e4 4398
05adfd80
LT
4399 cancel_delayed_work_sync(&con->ras_counte_delay_work);
4400
c030f2e4 4401 amdgpu_ras_set_context(adev, NULL);
4402 kfree(con);
4403
4404 return 0;
4405}
7c6e68c7 4406
1b6ef74b
LL
4407bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev)
4408{
4409 struct amdgpu_ras *ras;
4410
4411 ras = amdgpu_ras_get_context(adev);
4412 if (!ras)
4413 return false;
4414
e1ee2111 4415 return test_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
1b6ef74b
LL
4416}
4417
4418void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status)
4419{
4420 struct amdgpu_ras *ras;
4421
e1ee2111
LL
4422 ras = amdgpu_ras_get_context(adev);
4423 if (ras) {
4424 if (status)
4425 set_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
4426 else
4427 clear_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
4428 }
4429}
4430
4431void amdgpu_ras_clear_err_state(struct amdgpu_device *adev)
4432{
4433 struct amdgpu_ras *ras;
4434
1b6ef74b 4435 ras = amdgpu_ras_get_context(adev);
3bdf8dd8 4436 if (ras) {
e1ee2111 4437 ras->ras_err_state = 0;
3bdf8dd8
LL
4438 ras->gpu_reset_flags = 0;
4439 }
e1ee2111
LL
4440}
4441
4442void amdgpu_ras_set_err_poison(struct amdgpu_device *adev,
4443 enum amdgpu_ras_block block)
4444{
4445 struct amdgpu_ras *ras;
4446
4447 ras = amdgpu_ras_get_context(adev);
4448 if (ras)
4449 set_bit(block, &ras->ras_err_state);
4450}
4451
4452bool amdgpu_ras_is_err_state(struct amdgpu_device *adev, int block)
4453{
4454 struct amdgpu_ras *ras;
4455
4456 ras = amdgpu_ras_get_context(adev);
4457 if (ras) {
4458 if (block == AMDGPU_RAS_BLOCK__ANY)
4459 return (ras->ras_err_state != 0);
4460 else
4461 return test_bit(block, &ras->ras_err_state) ||
4462 test_bit(AMDGPU_RAS_BLOCK__LAST,
4463 &ras->ras_err_state);
4464 }
4465
4466 return false;
1b6ef74b
LL
4467}
4468
75ac6a25 4469static struct ras_event_manager *__get_ras_event_mgr(struct amdgpu_device *adev)
9dc57c2a 4470{
75ac6a25
YW
4471 struct amdgpu_ras *ras;
4472
4473 ras = amdgpu_ras_get_context(adev);
4474 if (!ras)
4475 return NULL;
4476
4477 return ras->event_mgr;
4478}
4479
4480int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_type type,
4481 const void *caller)
4482{
4483 struct ras_event_manager *event_mgr;
59f488be 4484 struct ras_event_state *event_state;
75ac6a25
YW
4485 int ret = 0;
4486
4487 if (type >= RAS_EVENT_TYPE_COUNT) {
4488 ret = -EINVAL;
4489 goto out;
4490 }
4491
4492 event_mgr = __get_ras_event_mgr(adev);
4493 if (!event_mgr) {
4494 ret = -EINVAL;
4495 goto out;
4496 }
4497
59f488be
YW
4498 event_state = &event_mgr->event_state[type];
4499 event_state->last_seqno = atomic64_inc_return(&event_mgr->seqno);
4500 atomic64_inc(&event_state->count);
75ac6a25
YW
4501
4502out:
4503 if (ret && caller)
4504 dev_warn(adev->dev, "failed mark ras event (%d) in %ps, ret:%d\n",
4505 (int)type, caller, ret);
4506
4507 return ret;
9dc57c2a
YW
4508}
4509
4510u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type)
4511{
75ac6a25 4512 struct ras_event_manager *event_mgr;
9dc57c2a
YW
4513 u64 id;
4514
75ac6a25
YW
4515 if (type >= RAS_EVENT_TYPE_COUNT)
4516 return RAS_EVENT_INVALID_ID;
4517
9dc57c2a 4518 switch (type) {
75ac6a25 4519 case RAS_EVENT_TYPE_FATAL:
5b9de259 4520 case RAS_EVENT_TYPE_POISON_CREATION:
12b435a4 4521 case RAS_EVENT_TYPE_POISON_CONSUMPTION:
75ac6a25
YW
4522 event_mgr = __get_ras_event_mgr(adev);
4523 if (!event_mgr)
4524 return RAS_EVENT_INVALID_ID;
4525
59f488be 4526 id = event_mgr->event_state[type].last_seqno;
9dc57c2a
YW
4527 break;
4528 case RAS_EVENT_TYPE_INVALID:
4529 default:
75ac6a25 4530 id = RAS_EVENT_INVALID_ID;
9dc57c2a
YW
4531 break;
4532 }
4533
4534 return id;
4535}
4536
7c6e68c7
AG
4537void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
4538{
4539 if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
2c7cd280 4540 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
75ac6a25
YW
4541 enum ras_event_type type = RAS_EVENT_TYPE_FATAL;
4542 u64 event_id;
4543
937467b7
LL
4544 if (amdgpu_ras_mark_ras_event(adev, type)) {
4545 dev_err(adev->dev,
4546 "uncorrectable hardware error (ERREVENT_ATHUB_INTERRUPT) detected!\n");
75ac6a25 4547 return;
937467b7 4548 }
75ac6a25
YW
4549
4550 event_id = amdgpu_ras_acquire_event_id(adev, type);
2c7cd280 4551
9dc57c2a
YW
4552 RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error"
4553 "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
d5ea093e 4554
b41f742d 4555 amdgpu_ras_set_fed(adev, true);
2c7cd280 4556 ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
61934624 4557 amdgpu_ras_reset_gpu(adev);
7c6e68c7
AG
4558 }
4559}
bb5c7235
WS
4560
4561bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
4562{
4563 if (adev->asic_type == CHIP_VEGA20 &&
4564 adev->pm.fw_version <= 0x283400) {
4565 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
4566 amdgpu_ras_intr_triggered();
4567 }
4568
4569 return false;
4570}
970fd197
SY
4571
4572void amdgpu_release_ras_context(struct amdgpu_device *adev)
4573{
4574 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4575
4576 if (!con)
4577 return;
4578
8ab0d6f0 4579 if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
970fd197
SY
4580 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
4581 amdgpu_ras_set_context(adev, NULL);
4582 kfree(con);
4583 }
4584}
12b2cab7
MJ
4585
4586#ifdef CONFIG_X86_MCE_AMD
4587static struct amdgpu_device *find_adev(uint32_t node_id)
4588{
12b2cab7
MJ
4589 int i;
4590 struct amdgpu_device *adev = NULL;
4591
91a1a52d
MJ
4592 for (i = 0; i < mce_adev_list.num_gpu; i++) {
4593 adev = mce_adev_list.devs[i];
12b2cab7 4594
91a1a52d 4595 if (adev && adev->gmc.xgmi.connected_to_cpu &&
12b2cab7
MJ
4596 adev->gmc.xgmi.physical_node_id == node_id)
4597 break;
4598 adev = NULL;
4599 }
4600
12b2cab7
MJ
4601 return adev;
4602}
4603
4604#define GET_MCA_IPID_GPUID(m) (((m) >> 44) & 0xF)
4605#define GET_UMC_INST(m) (((m) >> 21) & 0x7)
4606#define GET_CHAN_INDEX(m) ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
4607#define GPU_ID_OFFSET 8
4608
4609static int amdgpu_bad_page_notifier(struct notifier_block *nb,
4610 unsigned long val, void *data)
4611{
4612 struct mce *m = (struct mce *)data;
4613 struct amdgpu_device *adev = NULL;
4614 uint32_t gpu_id = 0;
cd4c99f1 4615 uint32_t umc_inst = 0, ch_inst = 0;
12b2cab7
MJ
4616
4617 /*
4618 * If the error was generated in UMC_V2, which belongs to GPU UMCs,
4619 * and error occurred in DramECC (Extended error code = 0) then only
4620 * process the error, else bail out.
4621 */
91f75eb4 4622 if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
12b2cab7
MJ
4623 (XEC(m->status, 0x3f) == 0x0)))
4624 return NOTIFY_DONE;
4625
4626 /*
4627 * If it is correctable error, return.
4628 */
4629 if (mce_is_correctable(m))
4630 return NOTIFY_OK;
4631
4632 /*
4633 * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
4634 */
4635 gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
4636
4637 adev = find_adev(gpu_id);
4638 if (!adev) {
4639 DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
4640 gpu_id);
4641 return NOTIFY_DONE;
4642 }
4643
4644 /*
4645 * If it is uncorrectable error, then find out UMC instance and
4646 * channel index.
4647 */
4648 umc_inst = GET_UMC_INST(m->ipid);
4649 ch_inst = GET_CHAN_INDEX(m->ipid);
4650
4651 dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
4652 umc_inst, ch_inst);
4653
24b82292
TZ
4654 if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst))
4655 return NOTIFY_OK;
4656 else
6c0ca748 4657 return NOTIFY_DONE;
12b2cab7
MJ
4658}
4659
4660static struct notifier_block amdgpu_bad_page_nb = {
4661 .notifier_call = amdgpu_bad_page_notifier,
4662 .priority = MCE_PRIO_UC,
4663};
4664
91a1a52d 4665static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
12b2cab7 4666{
91a1a52d
MJ
4667 /*
4668 * Add the adev to the mce_adev_list.
4669 * During mode2 reset, amdgpu device is temporarily
4670 * removed from the mgpu_info list which can cause
4671 * page retirement to fail.
4672 * Use this list instead of mgpu_info to find the amdgpu
4673 * device on which the UMC error was reported.
4674 */
4675 mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
4676
12b2cab7
MJ
4677 /*
4678 * Register the x86 notifier only once
4679 * with MCE subsystem.
4680 */
4681 if (notifier_registered == false) {
4682 mce_register_decode_chain(&amdgpu_bad_page_nb);
4683 notifier_registered = true;
4684 }
4685}
4686#endif
7cab2124 4687
b6efdb02 4688struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
7cab2124 4689{
4690 if (!adev)
4691 return NULL;
4692
4693 return adev->psp.ras_context.ras;
4694}
4695
b6efdb02 4696int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
7cab2124 4697{
4698 if (!adev)
69f91d32 4699 return -EINVAL;
7cab2124 4700
4701 adev->psp.ras_context.ras = ras_con;
4702 return 0;
4703}
4704
4705/* check if ras is supported on block, say, sdma, gfx */
4706int amdgpu_ras_is_supported(struct amdgpu_device *adev,
4707 unsigned int block)
4708{
8f453c51 4709 int ret = 0;
7cab2124 4710 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4711
4712 if (block >= AMDGPU_RAS_BLOCK_COUNT)
4713 return 0;
8f453c51
YC
4714
4715 ret = ras && (adev->ras_enabled & (1 << block));
4716
4717 /* For the special asic with mem ecc enabled but sram ecc
4718 * not enabled, even if the ras block is not supported on
4719 * .ras_enabled, if the asic supports poison mode and the
4720 * ras block has ras configuration, it can be considered
4721 * that the ras block supports ras function.
4722 */
4723 if (!ret &&
bc0f8080
CL
4724 (block == AMDGPU_RAS_BLOCK__GFX ||
4725 block == AMDGPU_RAS_BLOCK__SDMA ||
4726 block == AMDGPU_RAS_BLOCK__VCN ||
4727 block == AMDGPU_RAS_BLOCK__JPEG) &&
7ec11c2f 4728 (amdgpu_ras_mask & (1 << block)) &&
8f453c51
YC
4729 amdgpu_ras_is_poison_mode_supported(adev) &&
4730 amdgpu_ras_get_ras_block(adev, block, 0))
4731 ret = 1;
4732
4733 return ret;
7cab2124 4734}
4735
4736int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
4737{
4738 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4739
5f7697bb 4740 /* mode1 is the only selection for RMA status */
792be2e2 4741 if (amdgpu_ras_is_rma(adev)) {
5f7697bb
TZ
4742 ras->gpu_reset_flags = 0;
4743 ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
4744 }
4745
9e0feb79
YC
4746 if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) {
4747 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
4748 int hive_ras_recovery = 0;
4749
4750 if (hive) {
4751 hive_ras_recovery = atomic_read(&hive->ras_recovery);
4752 amdgpu_put_xgmi_hive(hive);
4753 }
4754 /* In the case of multiple GPUs, after a GPU has started
4755 * resetting all GPUs on hive, other GPUs do not need to
4756 * trigger GPU reset again.
4757 */
4758 if (!hive_ras_recovery)
4759 amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
4760 else
4761 atomic_set(&ras->in_recovery, 0);
4762 } else {
4763 flush_work(&ras->recovery_work);
25a2b22e 4764 amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
9e0feb79
YC
4765 }
4766
7cab2124 4767 return 0;
4768}
4769
201761b5 4770int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable)
8096df76
TZ
4771{
4772 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
201761b5 4773 int ret = 0;
8096df76 4774
201761b5
LL
4775 if (con) {
4776 ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
4777 if (!ret)
04c4fcd2 4778 con->is_aca_debug_mode = enable;
201761b5
LL
4779 }
4780
4781 return ret;
8096df76
TZ
4782}
4783
33dcda51
YW
4784int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable)
4785{
4786 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4787 int ret = 0;
4788
4789 if (con) {
04c4fcd2
YW
4790 if (amdgpu_aca_is_enabled(adev))
4791 ret = amdgpu_aca_smu_set_debug_mode(adev, enable);
4792 else
4793 ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
33dcda51 4794 if (!ret)
04c4fcd2 4795 con->is_aca_debug_mode = enable;
33dcda51
YW
4796 }
4797
4798 return ret;
4799}
4800
04c4fcd2 4801bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev)
8096df76
TZ
4802{
4803 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
04c4fcd2 4804 const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
8096df76
TZ
4805 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
4806
4807 if (!con)
4808 return false;
4809
04c4fcd2
YW
4810 if ((amdgpu_aca_is_enabled(adev) && smu_funcs && smu_funcs->set_debug_mode) ||
4811 (!amdgpu_aca_is_enabled(adev) && mca_funcs && mca_funcs->mca_set_debug_mode))
4812 return con->is_aca_debug_mode;
8096df76
TZ
4813 else
4814 return true;
4815}
7cab2124 4816
8cc0f566
HZ
4817bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
4818 unsigned int *error_query_mode)
4819{
4820 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4821 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
04c4fcd2 4822 const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
8cc0f566
HZ
4823
4824 if (!con) {
4825 *error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY;
4826 return false;
4827 }
4828
84a2947e
VS
4829 if (amdgpu_sriov_vf(adev)) {
4830 *error_query_mode = AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY;
4831 } else if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode)) {
8cc0f566 4832 *error_query_mode =
04c4fcd2 4833 (con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
84a2947e 4834 } else {
8cc0f566 4835 *error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY;
84a2947e 4836 }
8cc0f566
HZ
4837
4838 return true;
4839}
4840
6492e1b0 4841/* Register each ip ras block into amdgpu ras */
4842int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
b6efdb02 4843 struct amdgpu_ras_block_object *ras_block_obj)
6492e1b0 4844{
d5e8ff5f 4845 struct amdgpu_ras_block_list *ras_node;
6492e1b0 4846 if (!adev || !ras_block_obj)
4847 return -EINVAL;
4848
d5e8ff5f 4849 ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
4850 if (!ras_node)
4851 return -ENOMEM;
4852
4853 INIT_LIST_HEAD(&ras_node->node);
4854 ras_node->ras_obj = ras_block_obj;
4855 list_add_tail(&ras_node->node, &adev->ras_list);
6492e1b0 4856
4857 return 0;
4858}
322a7e00
HZ
4859
4860void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name)
4861{
4862 if (!err_type_name)
4863 return;
4864
4865 switch (err_type) {
4866 case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
4867 sprintf(err_type_name, "correctable");
4868 break;
4869 case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
4870 sprintf(err_type_name, "uncorrectable");
4871 break;
4872 default:
4873 sprintf(err_type_name, "unknown");
4874 break;
4875 }
4876}
4877
4878bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev,
4879 const struct amdgpu_ras_err_status_reg_entry *reg_entry,
4880 uint32_t instance,
4881 uint32_t *memory_id)
4882{
4883 uint32_t err_status_lo_data, err_status_lo_offset;
4884
4885 if (!reg_entry)
4886 return false;
4887
4888 err_status_lo_offset =
4889 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
4890 reg_entry->seg_lo, reg_entry->reg_lo);
4891 err_status_lo_data = RREG32(err_status_lo_offset);
4892
4893 if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) &&
4894 !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG))
4895 return false;
4896
4897 *memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID);
4898
4899 return true;
4900}
4901
4902bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev,
4903 const struct amdgpu_ras_err_status_reg_entry *reg_entry,
4904 uint32_t instance,
4905 unsigned long *err_cnt)
4906{
4907 uint32_t err_status_hi_data, err_status_hi_offset;
4908
4909 if (!reg_entry)
4910 return false;
4911
4912 err_status_hi_offset =
4913 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
4914 reg_entry->seg_hi, reg_entry->reg_hi);
4915 err_status_hi_data = RREG32(err_status_hi_offset);
4916
4917 if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) &&
4918 !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG))
9b337b7d
HZ
4919 /* keep the check here in case we need to refer to the result later */
4920 dev_dbg(adev->dev, "Invalid err_info field\n");
322a7e00
HZ
4921
4922 /* read err count */
4923 *err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT);
4924
4925 return true;
4926}
4927
4928void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev,
4929 const struct amdgpu_ras_err_status_reg_entry *reg_list,
4930 uint32_t reg_list_size,
4931 const struct amdgpu_ras_memory_id_entry *mem_list,
4932 uint32_t mem_list_size,
4933 uint32_t instance,
4934 uint32_t err_type,
4935 unsigned long *err_count)
4936{
4937 uint32_t memory_id;
4938 unsigned long err_cnt;
4939 char err_type_name[16];
4940 uint32_t i, j;
4941
4942 for (i = 0; i < reg_list_size; i++) {
9b337b7d
HZ
4943 /* query memory_id from err_status_lo */
4944 if (!amdgpu_ras_inst_get_memory_id_field(adev, &reg_list[i],
4945 instance, &memory_id))
4946 continue;
4947
322a7e00
HZ
4948 /* query err_cnt from err_status_hi */
4949 if (!amdgpu_ras_inst_get_err_cnt_field(adev, &reg_list[i],
4950 instance, &err_cnt) ||
4951 !err_cnt)
4952 continue;
4953
322a7e00
HZ
4954 *err_count += err_cnt;
4955
4956 /* log the errors */
4957 amdgpu_ras_get_error_type_name(err_type, err_type_name);
4958 if (!mem_list) {
4959 /* memory_list is not supported */
4960 dev_info(adev->dev,
4961 "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n",
4962 err_cnt, err_type_name,
4963 reg_list[i].block_name,
4964 instance, memory_id);
4965 } else {
4966 for (j = 0; j < mem_list_size; j++) {
4967 if (memory_id == mem_list[j].memory_id) {
4968 dev_info(adev->dev,
4969 "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n",
4970 err_cnt, err_type_name,
4971 reg_list[i].block_name,
4972 instance, mem_list[j].name);
4973 break;
4974 }
4975 }
4976 }
4977 }
4978}
e53a3250
HZ
4979
4980void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
4981 const struct amdgpu_ras_err_status_reg_entry *reg_list,
4982 uint32_t reg_list_size,
4983 uint32_t instance)
4984{
4985 uint32_t err_status_lo_offset, err_status_hi_offset;
4986 uint32_t i;
4987
4988 for (i = 0; i < reg_list_size; i++) {
4989 err_status_lo_offset =
4990 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
4991 reg_list[i].seg_lo, reg_list[i].reg_lo);
4992 err_status_hi_offset =
4993 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
4994 reg_list[i].seg_hi, reg_list[i].reg_hi);
4995 WREG32(err_status_lo_offset, 0);
4996 WREG32(err_status_hi_offset, 0);
4997 }
4998}
5b1270be
YW
4999
5000int amdgpu_ras_error_data_init(struct ras_err_data *err_data)
5001{
5002 memset(err_data, 0, sizeof(*err_data));
5003
5004 INIT_LIST_HEAD(&err_data->err_node_list);
5005
5006 return 0;
5007}
5008
5009static void amdgpu_ras_error_node_release(struct ras_err_node *err_node)
5010{
5011 if (!err_node)
5012 return;
5013
5014 list_del(&err_node->node);
5015 kvfree(err_node);
5016}
5017
5018void amdgpu_ras_error_data_fini(struct ras_err_data *err_data)
5019{
5020 struct ras_err_node *err_node, *tmp;
5021
8a656611 5022 list_for_each_entry_safe(err_node, tmp, &err_data->err_node_list, node)
5b1270be 5023 amdgpu_ras_error_node_release(err_node);
5b1270be
YW
5024}
5025
5026static struct ras_err_node *amdgpu_ras_error_find_node_by_id(struct ras_err_data *err_data,
5027 struct amdgpu_smuio_mcm_config_info *mcm_info)
5028{
5029 struct ras_err_node *err_node;
5030 struct amdgpu_smuio_mcm_config_info *ref_id;
5031
5032 if (!err_data || !mcm_info)
5033 return NULL;
5034
5035 for_each_ras_error(err_node, err_data) {
5036 ref_id = &err_node->err_info.mcm_info;
5b1270be 5037
53d4d779
YW
5038 if (mcm_info->socket_id == ref_id->socket_id &&
5039 mcm_info->die_id == ref_id->die_id)
5040 return err_node;
5b1270be
YW
5041 }
5042
5043 return NULL;
5044}
5045
5046static struct ras_err_node *amdgpu_ras_error_node_new(void)
5047{
5048 struct ras_err_node *err_node;
5049
5050 err_node = kvzalloc(sizeof(*err_node), GFP_KERNEL);
5051 if (!err_node)
5052 return NULL;
5053
5054 INIT_LIST_HEAD(&err_node->node);
5055
5056 return err_node;
5057}
5058
dbf3850d
YW
5059static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct list_head *b)
5060{
5061 struct ras_err_node *nodea = container_of(a, struct ras_err_node, node);
5062 struct ras_err_node *nodeb = container_of(b, struct ras_err_node, node);
5063 struct amdgpu_smuio_mcm_config_info *infoa = &nodea->err_info.mcm_info;
5064 struct amdgpu_smuio_mcm_config_info *infob = &nodeb->err_info.mcm_info;
5065
5066 if (unlikely(infoa->socket_id != infob->socket_id))
5067 return infoa->socket_id - infob->socket_id;
5068 else
5069 return infoa->die_id - infob->die_id;
5070
5071 return 0;
5072}
5073
5b1270be 5074static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
0795b5d2 5075 struct amdgpu_smuio_mcm_config_info *mcm_info)
5b1270be
YW
5076{
5077 struct ras_err_node *err_node;
5078
5079 err_node = amdgpu_ras_error_find_node_by_id(err_data, mcm_info);
5080 if (err_node)
5081 return &err_node->err_info;
5082
5083 err_node = amdgpu_ras_error_node_new();
5084 if (!err_node)
5085 return NULL;
5086
0795b5d2 5087 memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
9f91e983 5088
5b1270be
YW
5089 err_data->err_list_count++;
5090 list_add_tail(&err_node->node, &err_data->err_node_list);
dbf3850d 5091 list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp);
5b1270be
YW
5092
5093 return &err_node->err_info;
5094}
5095
5096int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
671af066
YW
5097 struct amdgpu_smuio_mcm_config_info *mcm_info,
5098 u64 count)
5b1270be
YW
5099{
5100 struct ras_err_info *err_info;
5101
5102 if (!err_data || !mcm_info)
5103 return -EINVAL;
5104
5105 if (!count)
5106 return 0;
5107
0795b5d2 5108 err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
5b1270be
YW
5109 if (!err_info)
5110 return -EINVAL;
5111
5112 err_info->ue_count += count;
5113 err_data->ue_count += count;
5114
5115 return 0;
5116}
5117
5118int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
671af066
YW
5119 struct amdgpu_smuio_mcm_config_info *mcm_info,
5120 u64 count)
5b1270be
YW
5121{
5122 struct ras_err_info *err_info;
5123
5124 if (!err_data || !mcm_info)
5125 return -EINVAL;
5126
5127 if (!count)
5128 return 0;
5129
0795b5d2 5130 err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
5b1270be
YW
5131 if (!err_info)
5132 return -EINVAL;
5133
5134 err_info->ce_count += count;
5135 err_data->ce_count += count;
5136
5137 return 0;
5138}
cce4febb 5139
46e2231c 5140int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
671af066
YW
5141 struct amdgpu_smuio_mcm_config_info *mcm_info,
5142 u64 count)
46e2231c
CL
5143{
5144 struct ras_err_info *err_info;
5145
5146 if (!err_data || !mcm_info)
5147 return -EINVAL;
5148
5149 if (!count)
5150 return 0;
5151
0795b5d2 5152 err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
46e2231c
CL
5153 if (!err_info)
5154 return -EINVAL;
5155
5156 err_info->de_count += count;
5157 err_data->de_count += count;
5158
5159 return 0;
5160}
5161
cce4febb
HZ
5162#define mmMP0_SMN_C2PMSG_92 0x1609C
5163#define mmMP0_SMN_C2PMSG_126 0x160BE
5164static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev,
a474161e 5165 u32 instance)
cce4febb
HZ
5166{
5167 u32 socket_id, aid_id, hbm_id;
a474161e
HZ
5168 u32 fw_status;
5169 u32 boot_error;
cce4febb
HZ
5170 u64 reg_addr;
5171
cce4febb
HZ
5172 /* The pattern for smn addressing in other SOC could be different from
5173 * the one for aqua_vanjaram. We should revisit the code if the pattern
5174 * is changed. In such case, replace the aqua_vanjaram implementation
5175 * with more common helper */
5176 reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
5177 aqua_vanjaram_encode_ext_smn_addressing(instance);
a474161e
HZ
5178 fw_status = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
5179
5180 reg_addr = (mmMP0_SMN_C2PMSG_126 << 2) +
5181 aqua_vanjaram_encode_ext_smn_addressing(instance);
5182 boot_error = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
cce4febb 5183
a474161e
HZ
5184 socket_id = AMDGPU_RAS_GPU_ERR_SOCKET_ID(boot_error);
5185 aid_id = AMDGPU_RAS_GPU_ERR_AID_ID(boot_error);
d3dbccac 5186 hbm_id = ((1 == AMDGPU_RAS_GPU_ERR_HBM_ID(boot_error)) ? 0 : 1);
cce4febb
HZ
5187
5188 if (AMDGPU_RAS_GPU_ERR_MEM_TRAINING(boot_error))
a474161e
HZ
5189 dev_info(adev->dev,
5190 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, memory training failed\n",
5191 socket_id, aid_id, hbm_id, fw_status);
cce4febb
HZ
5192
5193 if (AMDGPU_RAS_GPU_ERR_FW_LOAD(boot_error))
a474161e
HZ
5194 dev_info(adev->dev,
5195 "socket: %d, aid: %d, fw_status: 0x%x, firmware load failed at boot time\n",
5196 socket_id, aid_id, fw_status);
cce4febb
HZ
5197
5198 if (AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(boot_error))
a474161e
HZ
5199 dev_info(adev->dev,
5200 "socket: %d, aid: %d, fw_status: 0x%x, wafl link training failed\n",
5201 socket_id, aid_id, fw_status);
cce4febb
HZ
5202
5203 if (AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(boot_error))
a474161e
HZ
5204 dev_info(adev->dev,
5205 "socket: %d, aid: %d, fw_status: 0x%x, xgmi link training failed\n",
5206 socket_id, aid_id, fw_status);
cce4febb
HZ
5207
5208 if (AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(boot_error))
a474161e
HZ
5209 dev_info(adev->dev,
5210 "socket: %d, aid: %d, fw_status: 0x%x, usr cp link training failed\n",
5211 socket_id, aid_id, fw_status);
cce4febb
HZ
5212
5213 if (AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(boot_error))
a474161e
HZ
5214 dev_info(adev->dev,
5215 "socket: %d, aid: %d, fw_status: 0x%x, usr dp link training failed\n",
5216 socket_id, aid_id, fw_status);
cce4febb
HZ
5217
5218 if (AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(boot_error))
a474161e
HZ
5219 dev_info(adev->dev,
5220 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm memory test failed\n",
5221 socket_id, aid_id, hbm_id, fw_status);
cce4febb
HZ
5222
5223 if (AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(boot_error))
a474161e
HZ
5224 dev_info(adev->dev,
5225 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm bist test failed\n",
5226 socket_id, aid_id, hbm_id, fw_status);
dfe9d047
HZ
5227
5228 if (AMDGPU_RAS_GPU_ERR_DATA_ABORT(boot_error))
5229 dev_info(adev->dev,
5230 "socket: %d, aid: %d, fw_status: 0x%x, data abort exception\n",
5231 socket_id, aid_id, fw_status);
5232
d4bd7a50 5233 if (AMDGPU_RAS_GPU_ERR_GENERIC(boot_error))
dfe9d047 5234 dev_info(adev->dev,
d4bd7a50 5235 "socket: %d, aid: %d, fw_status: 0x%x, Boot Controller Generic Error\n",
dfe9d047 5236 socket_id, aid_id, fw_status);
cce4febb
HZ
5237}
5238
a474161e
HZ
5239static bool amdgpu_ras_boot_error_detected(struct amdgpu_device *adev,
5240 u32 instance)
cce4febb 5241{
a474161e 5242 u64 reg_addr;
cce4febb
HZ
5243 u32 reg_data;
5244 int retry_loop;
5245
1731ba9b
HZ
5246 reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
5247 aqua_vanjaram_encode_ext_smn_addressing(instance);
5248
5249 for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) {
5250 reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
a474161e
HZ
5251 if ((reg_data & AMDGPU_RAS_BOOT_STATUS_MASK) == AMDGPU_RAS_BOOT_STEADY_STATUS)
5252 return false;
5253 else
5254 msleep(1);
cce4febb
HZ
5255 }
5256
a474161e 5257 return true;
cce4febb
HZ
5258}
5259
5260void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances)
5261{
cce4febb
HZ
5262 u32 i;
5263
5264 for (i = 0; i < num_instances; i++) {
a474161e
HZ
5265 if (amdgpu_ras_boot_error_detected(adev, i))
5266 amdgpu_ras_boot_time_error_reporting(adev, i);
cce4febb
HZ
5267 }
5268}
af730e08
YC
5269
5270int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn)
5271{
5272 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5273 struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
5274 uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT;
5275 int ret = 0;
5276
5277 mutex_lock(&con->page_rsv_lock);
5278 ret = amdgpu_vram_mgr_query_page_status(mgr, start);
5279 if (ret == -ENOENT)
5280 ret = amdgpu_vram_mgr_reserve_range(mgr, start, AMDGPU_GPU_PAGE_SIZE);
5281 mutex_unlock(&con->page_rsv_lock);
5282
5283 return ret;
5284}
b712d7c2
YW
5285
5286void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
5287 const char *fmt, ...)
5288{
5289 struct va_format vaf;
5290 va_list args;
5291
5292 va_start(args, fmt);
5293 vaf.fmt = fmt;
5294 vaf.va = &args;
5295
75ac6a25 5296 if (RAS_EVENT_ID_IS_VALID(event_id))
b712d7c2
YW
5297 dev_printk(KERN_INFO, adev->dev, "{%llu}%pV", event_id, &vaf);
5298 else
5299 dev_printk(KERN_INFO, adev->dev, "%pV", &vaf);
5300
5301 va_end(args);
5302}
792be2e2
TZ
5303
5304bool amdgpu_ras_is_rma(struct amdgpu_device *adev)
5305{
5306 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5307
5308 if (!con)
5309 return false;
5310
5311 return con->is_rma;
5312}