drm/amdgpu: Use ARRAY_SIZE to get array length
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ras.c
CommitLineData
c030f2e4 1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/debugfs.h>
25#include <linux/list.h>
26#include <linux/module.h>
f867723b 27#include <linux/uaccess.h>
7c6e68c7
AG
28#include <linux/reboot.h>
29#include <linux/syscalls.h>
05adfd80 30#include <linux/pm_runtime.h>
f867723b 31
c030f2e4 32#include "amdgpu.h"
33#include "amdgpu_ras.h"
b404ae82 34#include "amdgpu_atomfirmware.h"
19744f5f 35#include "amdgpu_xgmi.h"
4e644fff 36#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
f50160cf 37#include "atom.h"
12b2cab7
MJ
38#ifdef CONFIG_X86_MCE_AMD
39#include <asm/mce.h>
c030f2e4 40
12b2cab7
MJ
41static bool notifier_registered;
42#endif
eb0c3cd4
GC
43static const char *RAS_FS_NAME = "ras";
44
c030f2e4 45const char *ras_error_string[] = {
46 "none",
47 "parity",
48 "single_correctable",
49 "multi_uncorrectable",
50 "poison",
51};
52
53const char *ras_block_string[] = {
54 "umc",
55 "sdma",
56 "gfx",
57 "mmhub",
58 "athub",
59 "pcie_bif",
60 "hdp",
61 "xgmi_wafl",
62 "df",
63 "smn",
64 "sem",
65 "mp0",
66 "mp1",
67 "fuse",
640ae42e 68 "mca",
c030f2e4 69};
70
640ae42e
JC
71const char *ras_mca_block_string[] = {
72 "mca_mp0",
73 "mca_mp1",
74 "mca_mpio",
75 "mca_iohc",
76};
77
78const char *get_ras_block_str(struct ras_common_if *ras_block)
79{
80 if (!ras_block)
81 return "NULL";
82
83 if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT)
84 return "OUT OF RANGE";
85
86 if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
87 return ras_mca_block_string[ras_block->sub_block_index];
88
89 return ras_block_string[ras_block->block];
90}
91
954ea6aa 92#define ras_block_str(_BLOCK_) \
93 (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
8b0fb0e9 94
c030f2e4 95#define ras_err_str(i) (ras_error_string[ffs(i)])
c030f2e4 96
108c6a63 97#define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
98
7cdc2ee3
TZ
99/* inject address is 52 bits */
100#define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
101
e4e6a589
LT
102/* typical ECC bad page rate is 1 bad page per 100MB VRAM */
103#define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL)
c84d4670 104
52dd95f2
GC
105enum amdgpu_ras_retire_page_reservation {
106 AMDGPU_RAS_RETIRE_PAGE_RESERVED,
107 AMDGPU_RAS_RETIRE_PAGE_PENDING,
108 AMDGPU_RAS_RETIRE_PAGE_FAULT,
109};
7c6e68c7
AG
110
111atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
112
676deb38
DL
113static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
114 uint64_t addr);
6e4be987
TZ
115static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
116 uint64_t addr);
12b2cab7 117#ifdef CONFIG_X86_MCE_AMD
91a1a52d
MJ
118static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
119struct mce_notifier_adev_list {
120 struct amdgpu_device *devs[MAX_GPU_INSTANCE];
121 int num_gpu;
122};
123static struct mce_notifier_adev_list mce_adev_list;
12b2cab7 124#endif
6e4be987 125
61380faa
JC
126void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
127{
a9d82d2f 128 if (adev && amdgpu_ras_get_context(adev))
61380faa
JC
129 amdgpu_ras_get_context(adev)->error_query_ready = ready;
130}
131
f3167919 132static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
61380faa 133{
a9d82d2f 134 if (adev && amdgpu_ras_get_context(adev))
61380faa
JC
135 return amdgpu_ras_get_context(adev)->error_query_ready;
136
137 return false;
138}
139
cbb8f989
JC
140static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
141{
142 struct ras_err_data err_data = {0, 0, 0, NULL};
143 struct eeprom_table_record err_rec;
144
145 if ((address >= adev->gmc.mc_vram_size) ||
146 (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
147 dev_warn(adev->dev,
148 "RAS WARN: input address 0x%llx is invalid.\n",
149 address);
150 return -EINVAL;
151 }
152
153 if (amdgpu_ras_check_bad_page(adev, address)) {
154 dev_warn(adev->dev,
80b0cd0f 155 "RAS WARN: 0x%llx has already been marked as bad page!\n",
cbb8f989
JC
156 address);
157 return 0;
158 }
159
160 memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
161
162 err_rec.address = address;
163 err_rec.retired_page = address >> AMDGPU_GPU_PAGE_SHIFT;
164 err_rec.ts = (uint64_t)ktime_get_real_seconds();
165 err_rec.err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
166
167 err_data.err_addr = &err_rec;
168 err_data.err_addr_cnt = 1;
169
170 if (amdgpu_bad_page_threshold != 0) {
171 amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
172 err_data.err_addr_cnt);
173 amdgpu_ras_save_bad_pages(adev);
174 }
175
176 dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
177 dev_warn(adev->dev, "Clear EEPROM:\n");
178 dev_warn(adev->dev, " echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
179
180 return 0;
181}
182
c030f2e4 183static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
184 size_t size, loff_t *pos)
185{
186 struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
187 struct ras_query_if info = {
188 .head = obj->head,
189 };
190 ssize_t s;
191 char val[128];
192
761d86d3 193 if (amdgpu_ras_query_error_status(obj->adev, &info))
c030f2e4 194 return -EINVAL;
195
196 s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
197 "ue", info.ue_count,
198 "ce", info.ce_count);
199 if (*pos >= s)
200 return 0;
201
202 s -= *pos;
203 s = min_t(u64, s, size);
204
205
206 if (copy_to_user(buf, &val[*pos], s))
207 return -EINVAL;
208
209 *pos += s;
210
211 return s;
212}
213
c030f2e4 214static const struct file_operations amdgpu_ras_debugfs_ops = {
215 .owner = THIS_MODULE,
216 .read = amdgpu_ras_debugfs_read,
190211ab 217 .write = NULL,
c030f2e4 218 .llseek = default_llseek
219};
220
96ebb307 221static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
222{
223 int i;
224
225 for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
226 *block_id = i;
640ae42e 227 if (strcmp(name, ras_block_string[i]) == 0)
96ebb307 228 return 0;
229 }
230 return -EINVAL;
231}
232
233static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
234 const char __user *buf, size_t size,
235 loff_t *pos, struct ras_debug_if *data)
236{
237 ssize_t s = min_t(u64, 64, size);
238 char str[65];
239 char block_name[33];
240 char err[9] = "ue";
241 int op = -1;
242 int block_id;
44494f96 243 uint32_t sub_block;
96ebb307 244 u64 address, value;
245
246 if (*pos)
247 return -EINVAL;
248 *pos = size;
249
250 memset(str, 0, sizeof(str));
251 memset(data, 0, sizeof(*data));
252
253 if (copy_from_user(str, buf, s))
254 return -EINVAL;
255
256 if (sscanf(str, "disable %32s", block_name) == 1)
257 op = 0;
258 else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
259 op = 1;
260 else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
261 op = 2;
6df23f4c 262 else if (strstr(str, "retire_page") != NULL)
cbb8f989 263 op = 3;
b076296b 264 else if (str[0] && str[1] && str[2] && str[3])
96ebb307 265 /* ascii string, but commands are not matched. */
266 return -EINVAL;
267
268 if (op != -1) {
cbb8f989 269 if (op == 3) {
546aa546
LT
270 if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
271 sscanf(str, "%*s %llu", &address) != 1)
6cb7a1d4 272 return -EINVAL;
cbb8f989
JC
273
274 data->op = op;
275 data->inject.address = address;
276
277 return 0;
278 }
279
96ebb307 280 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
281 return -EINVAL;
282
283 data->head.block = block_id;
e1063493
TZ
284 /* only ue and ce errors are supported */
285 if (!memcmp("ue", err, 2))
286 data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
287 else if (!memcmp("ce", err, 2))
288 data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
289 else
290 return -EINVAL;
291
96ebb307 292 data->op = op;
293
294 if (op == 2) {
546aa546
LT
295 if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
296 &sub_block, &address, &value) != 3 &&
297 sscanf(str, "%*s %*s %*s %u %llu %llu",
6cb7a1d4
LT
298 &sub_block, &address, &value) != 3)
299 return -EINVAL;
44494f96 300 data->head.sub_block_index = sub_block;
96ebb307 301 data->inject.address = address;
302 data->inject.value = value;
303 }
304 } else {
73aa8e1a 305 if (size < sizeof(*data))
96ebb307 306 return -EINVAL;
307
308 if (copy_from_user(data, buf, sizeof(*data)))
309 return -EINVAL;
310 }
311
312 return 0;
313}
7c6e68c7 314
74abc221
TSD
315/**
316 * DOC: AMDGPU RAS debugfs control interface
36ea1bd2 317 *
737c375b 318 * The control interface accepts struct ras_debug_if which has two members.
36ea1bd2 319 *
320 * First member: ras_debug_if::head or ras_debug_if::inject.
96ebb307 321 *
322 * head is used to indicate which IP block will be under control.
36ea1bd2 323 *
324 * head has four members, they are block, type, sub_block_index, name.
325 * block: which IP will be under control.
326 * type: what kind of error will be enabled/disabled/injected.
327 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
328 * name: the name of IP.
329 *
330 * inject has two more members than head, they are address, value.
331 * As their names indicate, inject operation will write the
332 * value to the address.
333 *
ef177d11 334 * The second member: struct ras_debug_if::op.
c688a06b 335 * It has three kinds of operations.
879e723d
AZ
336 *
337 * - 0: disable RAS on the block. Take ::head as its data.
338 * - 1: enable RAS on the block. Take ::head as its data.
339 * - 2: inject errors on the block. Take ::inject as its data.
36ea1bd2 340 *
96ebb307 341 * How to use the interface?
ef177d11 342 *
737c375b 343 * In a program
ef177d11 344 *
737c375b
LT
345 * Copy the struct ras_debug_if in your code and initialize it.
346 * Write the struct to the control interface.
ef177d11 347 *
737c375b 348 * From shell
96ebb307 349 *
879e723d
AZ
350 * .. code-block:: bash
351 *
737c375b
LT
352 * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
353 * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
354 * echo "inject <block> <error> <sub-block> <address> <value> > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
879e723d 355 *
737c375b 356 * Where N, is the card which you want to affect.
ef177d11 357 *
737c375b
LT
358 * "disable" requires only the block.
359 * "enable" requires the block and error type.
360 * "inject" requires the block, error type, address, and value.
c666bbf0 361 *
737c375b 362 * The block is one of: umc, sdma, gfx, etc.
879e723d 363 * see ras_block_string[] for details
c666bbf0 364 *
737c375b
LT
365 * The error type is one of: ue, ce, where,
366 * ue is multi-uncorrectable
367 * ce is single-correctable
c666bbf0 368 *
737c375b
LT
369 * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
370 * The address and value are hexadecimal numbers, leading 0x is optional.
879e723d 371 *
737c375b 372 * For instance,
879e723d
AZ
373 *
374 * .. code-block:: bash
96ebb307 375 *
44494f96
TZ
376 * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
377 * echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
96ebb307 378 * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
379 *
737c375b 380 * How to check the result of the operation?
36ea1bd2 381 *
737c375b 382 * To check disable/enable, see "ras" features at,
36ea1bd2 383 * /sys/class/drm/card[0/1/2...]/device/ras/features
384 *
737c375b
LT
385 * To check inject, see the corresponding error count at,
386 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
36ea1bd2 387 *
879e723d 388 * .. note::
ef177d11 389 * Operations are only allowed on blocks which are supported.
737c375b 390 * Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
ef177d11
AD
391 * to see which blocks support RAS on a particular asic.
392 *
36ea1bd2 393 */
cf696091
LT
394static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
395 const char __user *buf,
396 size_t size, loff_t *pos)
36ea1bd2 397{
398 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
399 struct ras_debug_if data;
400 int ret = 0;
401
61380faa 402 if (!amdgpu_ras_get_error_query_ready(adev)) {
6952e99c
GC
403 dev_warn(adev->dev, "RAS WARN: error injection "
404 "currently inaccessible\n");
43c4d576
JC
405 return size;
406 }
407
96ebb307 408 ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
409 if (ret)
cf696091 410 return ret;
36ea1bd2 411
80b0cd0f 412 if (data.op == 3) {
cbb8f989 413 ret = amdgpu_reserve_page_direct(adev, data.inject.address);
80b0cd0f 414 if (!ret)
cbb8f989
JC
415 return size;
416 else
417 return ret;
418 }
419
36ea1bd2 420 if (!amdgpu_ras_is_supported(adev, data.head.block))
421 return -EINVAL;
422
423 switch (data.op) {
424 case 0:
425 ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
426 break;
427 case 1:
428 ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
429 break;
430 case 2:
7cdc2ee3
TZ
431 if ((data.inject.address >= adev->gmc.mc_vram_size) ||
432 (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
b0d4783a
GC
433 dev_warn(adev->dev, "RAS WARN: input address "
434 "0x%llx is invalid.",
435 data.inject.address);
7cdc2ee3
TZ
436 ret = -EINVAL;
437 break;
438 }
439
6e4be987
TZ
440 /* umc ce/ue error injection for a bad page is not allowed */
441 if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
442 amdgpu_ras_check_bad_page(adev, data.inject.address)) {
c65b0805
LT
443 dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
444 "already been marked as bad!\n",
445 data.inject.address);
6e4be987
TZ
446 break;
447 }
448
7cdc2ee3 449 /* data.inject.address is offset instead of absolute gpu address */
36ea1bd2 450 ret = amdgpu_ras_error_inject(adev, &data.inject);
451 break;
96ebb307 452 default:
453 ret = -EINVAL;
454 break;
374bf7bd 455 }
36ea1bd2 456
457 if (ret)
458 return -EINVAL;
459
460 return size;
461}
462
084fe13b
AG
463/**
464 * DOC: AMDGPU RAS debugfs EEPROM table reset interface
465 *
f77c7109 466 * Some boards contain an EEPROM which is used to persistently store a list of
ef177d11 467 * bad pages which experiences ECC errors in vram. This interface provides
f77c7109
AD
468 * a way to reset the EEPROM, e.g., after testing error injection.
469 *
470 * Usage:
471 *
472 * .. code-block:: bash
473 *
474 * echo 1 > ../ras/ras_eeprom_reset
475 *
476 * will reset EEPROM table to 0 entries.
477 *
084fe13b 478 */
cf696091
LT
479static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
480 const char __user *buf,
481 size_t size, loff_t *pos)
084fe13b 482{
bf0b91b7
GC
483 struct amdgpu_device *adev =
484 (struct amdgpu_device *)file_inode(f)->i_private;
084fe13b
AG
485 int ret;
486
bf0b91b7 487 ret = amdgpu_ras_eeprom_reset_table(
cf696091 488 &(amdgpu_ras_get_context(adev)->eeprom_control));
084fe13b 489
63d4c081 490 if (!ret) {
cf696091
LT
491 /* Something was written to EEPROM.
492 */
bf0b91b7
GC
493 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
494 return size;
495 } else {
cf696091 496 return ret;
bf0b91b7 497 }
084fe13b
AG
498}
499
36ea1bd2 500static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
501 .owner = THIS_MODULE,
502 .read = NULL,
503 .write = amdgpu_ras_debugfs_ctrl_write,
504 .llseek = default_llseek
505};
506
084fe13b
AG
507static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
508 .owner = THIS_MODULE,
509 .read = NULL,
510 .write = amdgpu_ras_debugfs_eeprom_write,
511 .llseek = default_llseek
512};
513
f77c7109
AD
514/**
515 * DOC: AMDGPU RAS sysfs Error Count Interface
516 *
ef177d11 517 * It allows the user to read the error count for each IP block on the gpu through
f77c7109
AD
518 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
519 *
520 * It outputs the multiple lines which report the uncorrected (ue) and corrected
521 * (ce) error counts.
522 *
523 * The format of one line is below,
524 *
525 * [ce|ue]: count
526 *
527 * Example:
528 *
529 * .. code-block:: bash
530 *
531 * ue: 0
532 * ce: 1
533 *
534 */
c030f2e4 535static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
536 struct device_attribute *attr, char *buf)
537{
538 struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
539 struct ras_query_if info = {
540 .head = obj->head,
541 };
542
61380faa 543 if (!amdgpu_ras_get_error_query_ready(obj->adev))
36000c7a 544 return sysfs_emit(buf, "Query currently inaccessible\n");
43c4d576 545
761d86d3 546 if (amdgpu_ras_query_error_status(obj->adev, &info))
c030f2e4 547 return -EINVAL;
548
1f0d8e37
MJ
549 if (obj->adev->asic_type == CHIP_ALDEBARAN) {
550 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
551 DRM_WARN("Failed to reset error counter and error status");
552 }
553
36000c7a
TT
554 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
555 "ce", info.ce_count);
c030f2e4 556}
557
558/* obj begin */
559
560#define get_obj(obj) do { (obj)->use++; } while (0)
561#define alive_obj(obj) ((obj)->use)
562
563static inline void put_obj(struct ras_manager *obj)
564{
f0872686 565 if (obj && (--obj->use == 0))
c030f2e4 566 list_del(&obj->node);
f0872686 567 if (obj && (obj->use < 0))
640ae42e 568 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
c030f2e4 569}
570
571/* make one obj and return it. */
572static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
573 struct ras_common_if *head)
574{
575 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
576 struct ras_manager *obj;
577
8ab0d6f0 578 if (!adev->ras_enabled || !con)
c030f2e4 579 return NULL;
580
581 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
582 return NULL;
583
640ae42e
JC
584 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
585 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
586 return NULL;
587
588 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
589 } else
590 obj = &con->objs[head->block];
591
c030f2e4 592 /* already exist. return obj? */
593 if (alive_obj(obj))
594 return NULL;
595
596 obj->head = *head;
597 obj->adev = adev;
598 list_add(&obj->node, &con->head);
599 get_obj(obj);
600
601 return obj;
602}
603
604/* return an obj equal to head, or the first when head is NULL */
f2a79be1 605struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
c030f2e4 606 struct ras_common_if *head)
607{
608 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
609 struct ras_manager *obj;
610 int i;
611
8ab0d6f0 612 if (!adev->ras_enabled || !con)
c030f2e4 613 return NULL;
614
615 if (head) {
616 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
617 return NULL;
618
640ae42e
JC
619 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
620 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
621 return NULL;
622
623 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
624 } else
625 obj = &con->objs[head->block];
c030f2e4 626
640ae42e 627 if (alive_obj(obj))
c030f2e4 628 return obj;
c030f2e4 629 } else {
640ae42e 630 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
c030f2e4 631 obj = &con->objs[i];
640ae42e 632 if (alive_obj(obj))
c030f2e4 633 return obj;
c030f2e4 634 }
635 }
636
637 return NULL;
638}
639/* obj end */
640
641/* feature ctl begin */
642static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
e509965e 643 struct ras_common_if *head)
c030f2e4 644{
8ab0d6f0 645 return adev->ras_hw_enabled & BIT(head->block);
c030f2e4 646}
647
648static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
649 struct ras_common_if *head)
650{
651 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
652
653 return con->features & BIT(head->block);
654}
655
656/*
657 * if obj is not created, then create one.
658 * set feature enable flag.
659 */
660static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
661 struct ras_common_if *head, int enable)
662{
663 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
664 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
665
5caf466a 666 /* If hardware does not support ras, then do not create obj.
667 * But if hardware support ras, we can create the obj.
668 * Ras framework checks con->hw_supported to see if it need do
669 * corresponding initialization.
670 * IP checks con->support to see if it need disable ras.
671 */
c030f2e4 672 if (!amdgpu_ras_is_feature_allowed(adev, head))
673 return 0;
c030f2e4 674
675 if (enable) {
676 if (!obj) {
677 obj = amdgpu_ras_create_obj(adev, head);
678 if (!obj)
679 return -EINVAL;
680 } else {
681 /* In case we create obj somewhere else */
682 get_obj(obj);
683 }
684 con->features |= BIT(head->block);
685 } else {
686 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
19d0dfda 687 con->features &= ~BIT(head->block);
c030f2e4 688 put_obj(obj);
689 }
690 }
691
692 return 0;
693}
694
695/* wrapper of psp_ras_enable_features */
696int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
697 struct ras_common_if *head, bool enable)
698{
699 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
7fcffecf 700 union ta_ras_cmd_input *info;
c030f2e4 701 int ret;
702
703 if (!con)
704 return -EINVAL;
705
f3729f7b 706 info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
7fcffecf
AB
707 if (!info)
708 return -ENOMEM;
709
c030f2e4 710 if (!enable) {
7fcffecf 711 info->disable_features = (struct ta_ras_disable_features_input) {
828cfa29 712 .block_id = amdgpu_ras_block_to_ta(head->block),
713 .error_type = amdgpu_ras_error_to_ta(head->type),
c030f2e4 714 };
715 } else {
7fcffecf 716 info->enable_features = (struct ta_ras_enable_features_input) {
828cfa29 717 .block_id = amdgpu_ras_block_to_ta(head->block),
718 .error_type = amdgpu_ras_error_to_ta(head->type),
c030f2e4 719 };
720 }
721
722 /* Do not enable if it is not allowed. */
723 WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
c030f2e4 724
bff77e86 725 if (!amdgpu_ras_intr_triggered()) {
7fcffecf 726 ret = psp_ras_enable_features(&adev->psp, info, enable);
bff77e86 727 if (ret) {
e4348849 728 dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
011907fd 729 enable ? "enable":"disable",
640ae42e 730 get_ras_block_str(head),
e4348849 731 amdgpu_ras_is_poison_mode_supported(adev), ret);
7fcffecf 732 goto out;
bff77e86 733 }
c030f2e4 734 }
735
736 /* setup the obj */
737 __amdgpu_ras_feature_enable(adev, head, enable);
7fcffecf
AB
738 ret = 0;
739out:
740 kfree(info);
741 return ret;
c030f2e4 742}
743
77de502b 744/* Only used in device probe stage and called only once. */
745int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
746 struct ras_common_if *head, bool enable)
747{
748 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
749 int ret;
750
751 if (!con)
752 return -EINVAL;
753
754 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
7af23ebe 755 if (enable) {
756 /* There is no harm to issue a ras TA cmd regardless of
757 * the currecnt ras state.
758 * If current state == target state, it will do nothing
759 * But sometimes it requests driver to reset and repost
760 * with error code -EAGAIN.
761 */
762 ret = amdgpu_ras_feature_enable(adev, head, 1);
763 /* With old ras TA, we might fail to enable ras.
764 * Log it and just setup the object.
765 * TODO need remove this WA in the future.
766 */
767 if (ret == -EINVAL) {
768 ret = __amdgpu_ras_feature_enable(adev, head, 1);
769 if (!ret)
6952e99c
GC
770 dev_info(adev->dev,
771 "RAS INFO: %s setup object\n",
640ae42e 772 get_ras_block_str(head));
7af23ebe 773 }
774 } else {
775 /* setup the object then issue a ras TA disable cmd.*/
776 ret = __amdgpu_ras_feature_enable(adev, head, 1);
777 if (ret)
778 return ret;
77de502b 779
970fd197
SY
780 /* gfx block ras dsiable cmd must send to ras-ta */
781 if (head->block == AMDGPU_RAS_BLOCK__GFX)
782 con->features |= BIT(head->block);
783
77de502b 784 ret = amdgpu_ras_feature_enable(adev, head, 0);
19d0dfda
SY
785
786 /* clean gfx block ras features flag */
8ab0d6f0 787 if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
19d0dfda 788 con->features &= ~BIT(head->block);
7af23ebe 789 }
77de502b 790 } else
791 ret = amdgpu_ras_feature_enable(adev, head, enable);
792
793 return ret;
794}
795
c030f2e4 796static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
797 bool bypass)
798{
799 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
800 struct ras_manager *obj, *tmp;
801
802 list_for_each_entry_safe(obj, tmp, &con->head, node) {
803 /* bypass psp.
804 * aka just release the obj and corresponding flags
805 */
806 if (bypass) {
807 if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
808 break;
809 } else {
810 if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
811 break;
812 }
289d513b 813 }
c030f2e4 814
815 return con->features;
816}
817
818static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
819 bool bypass)
820{
821 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
c030f2e4 822 int i;
640ae42e 823 const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
c030f2e4 824
640ae42e 825 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
c030f2e4 826 struct ras_common_if head = {
827 .block = i,
191051a1 828 .type = default_ras_type,
c030f2e4 829 .sub_block_index = 0,
830 };
640ae42e
JC
831
832 if (i == AMDGPU_RAS_BLOCK__MCA)
833 continue;
834
835 if (bypass) {
836 /*
837 * bypass psp. vbios enable ras for us.
838 * so just create the obj
839 */
840 if (__amdgpu_ras_feature_enable(adev, &head, 1))
841 break;
842 } else {
843 if (amdgpu_ras_feature_enable(adev, &head, 1))
844 break;
845 }
846 }
847
848 for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
849 struct ras_common_if head = {
850 .block = AMDGPU_RAS_BLOCK__MCA,
851 .type = default_ras_type,
852 .sub_block_index = i,
853 };
854
c030f2e4 855 if (bypass) {
856 /*
857 * bypass psp. vbios enable ras for us.
858 * so just create the obj
859 */
860 if (__amdgpu_ras_feature_enable(adev, &head, 1))
861 break;
862 } else {
863 if (amdgpu_ras_feature_enable(adev, &head, 1))
864 break;
865 }
289d513b 866 }
c030f2e4 867
868 return con->features;
869}
870/* feature ctl end */
871
6492e1b0 872int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object* block_obj, enum amdgpu_ras_block block)
873{
874 if(!block_obj)
875 return -EINVAL;
876
877 if (block_obj->block == block)
878 return 0;
879
880 return -EINVAL;
881}
882
883static struct amdgpu_ras_block_object* amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
884 enum amdgpu_ras_block block, uint32_t sub_block_index)
885{
886 struct amdgpu_ras_block_object *obj, *tmp;
887
888 if (block >= AMDGPU_RAS_BLOCK__LAST)
889 return NULL;
890
891 if (!amdgpu_ras_is_supported(adev, block))
892 return NULL;
893
894 list_for_each_entry_safe(obj, tmp, &adev->ras_list, node) {
895 if (obj->ras_block_match) {
896 if (obj->ras_block_match(obj, block, sub_block_index) == 0)
897 return obj;
898 } else {
899 if (amdgpu_ras_block_match_default(obj, block) == 0)
900 return obj;
901 }
902 }
903
904 return NULL;
905}
640ae42e 906
fdcb279d
SY
907static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
908{
909 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
910 int ret = 0;
911
912 /*
913 * choosing right query method according to
914 * whether smu support query error information
915 */
bc143d8b 916 ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
fdcb279d 917 if (ret == -EOPNOTSUPP) {
efe17d5a 918 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
919 adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
920 adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
fdcb279d
SY
921
922 /* umc query_ras_error_address is also responsible for clearing
923 * error status
924 */
efe17d5a 925 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
926 adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
927 adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
fdcb279d 928 } else if (!ret) {
efe17d5a 929 if (adev->umc.ras &&
930 adev->umc.ras->ecc_info_query_ras_error_count)
931 adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
fdcb279d 932
efe17d5a 933 if (adev->umc.ras &&
934 adev->umc.ras->ecc_info_query_ras_error_address)
935 adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
fdcb279d
SY
936 }
937}
938
c030f2e4 939/* query/inject/cure begin */
761d86d3 940int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
4d9f771e 941 struct ras_query_if *info)
c030f2e4 942{
8b0fb0e9 943 struct amdgpu_ras_block_object* block_obj = NULL;
c030f2e4 944 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
6f102dba 945 struct ras_err_data err_data = {0, 0, 0, NULL};
c030f2e4 946
947 if (!obj)
948 return -EINVAL;
c030f2e4 949
7389a5b8 950 if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
fdcb279d 951 amdgpu_ras_get_ecc_info(adev, &err_data);
7389a5b8 952 } else {
953 block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
8b0fb0e9 954 if (!block_obj || !block_obj->hw_ops) {
955 dev_info(adev->dev, "%s doesn't config ras function \n",
7389a5b8 956 get_ras_block_str(&info->head));
8b0fb0e9 957 return -EINVAL;
958 }
761d86d3 959
6c245386 960 if (block_obj->hw_ops->query_ras_error_count)
961 block_obj->hw_ops->query_ras_error_count(adev, &err_data);
7389a5b8 962
963 if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
964 (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
965 (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
966 if (block_obj->hw_ops->query_ras_error_status)
967 block_obj->hw_ops->query_ras_error_status(adev);
968 }
939e2258 969 }
05a58345
TZ
970
971 obj->err_data.ue_count += err_data.ue_count;
972 obj->err_data.ce_count += err_data.ce_count;
973
c030f2e4 974 info->ue_count = obj->err_data.ue_count;
975 info->ce_count = obj->err_data.ce_count;
976
7c6e68c7 977 if (err_data.ce_count) {
a30f1286
HZ
978 if (adev->smuio.funcs &&
979 adev->smuio.funcs->get_socket_id &&
980 adev->smuio.funcs->get_die_id) {
981 dev_info(adev->dev, "socket: %d, die: %d "
982 "%ld correctable hardware errors "
6952e99c
GC
983 "detected in %s block, no user "
984 "action is needed.\n",
a30f1286
HZ
985 adev->smuio.funcs->get_socket_id(adev),
986 adev->smuio.funcs->get_die_id(adev),
6952e99c 987 obj->err_data.ce_count,
640ae42e 988 get_ras_block_str(&info->head));
a30f1286
HZ
989 } else {
990 dev_info(adev->dev, "%ld correctable hardware errors "
6952e99c
GC
991 "detected in %s block, no user "
992 "action is needed.\n",
993 obj->err_data.ce_count,
640ae42e 994 get_ras_block_str(&info->head));
a30f1286 995 }
7c6e68c7
AG
996 }
997 if (err_data.ue_count) {
a30f1286
HZ
998 if (adev->smuio.funcs &&
999 adev->smuio.funcs->get_socket_id &&
1000 adev->smuio.funcs->get_die_id) {
1001 dev_info(adev->dev, "socket: %d, die: %d "
1002 "%ld uncorrectable hardware errors "
6952e99c 1003 "detected in %s block\n",
a30f1286
HZ
1004 adev->smuio.funcs->get_socket_id(adev),
1005 adev->smuio.funcs->get_die_id(adev),
6952e99c 1006 obj->err_data.ue_count,
640ae42e 1007 get_ras_block_str(&info->head));
a30f1286
HZ
1008 } else {
1009 dev_info(adev->dev, "%ld uncorrectable hardware errors "
6952e99c
GC
1010 "detected in %s block\n",
1011 obj->err_data.ue_count,
640ae42e 1012 get_ras_block_str(&info->head));
a30f1286 1013 }
7c6e68c7 1014 }
05a58345 1015
eb601e61
JC
1016 if (!amdgpu_persistent_edc_harvesting_supported(adev))
1017 amdgpu_ras_reset_error_status(adev, info->head.block);
1018
c030f2e4 1019 return 0;
1020}
1021
761d86d3
DL
1022int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1023 enum amdgpu_ras_block block)
1024{
8b0fb0e9 1025 struct amdgpu_ras_block_object* block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1026
761d86d3
DL
1027 if (!amdgpu_ras_is_supported(adev, block))
1028 return -EINVAL;
1029
7389a5b8 1030 if (!block_obj || !block_obj->hw_ops) {
1031 dev_info(adev->dev, "%s doesn't config ras function \n", ras_block_str(block));
1032 return -EINVAL;
1033 }
8b0fb0e9 1034
7389a5b8 1035 if (block_obj->hw_ops->reset_ras_error_count)
1036 block_obj->hw_ops->reset_ras_error_count(adev);
761d86d3 1037
7389a5b8 1038 if ((block == AMDGPU_RAS_BLOCK__GFX) ||
1039 (block == AMDGPU_RAS_BLOCK__MMHUB)) {
8b0fb0e9 1040 if (block_obj->hw_ops->reset_ras_error_status)
1041 block_obj->hw_ops->reset_ras_error_status(adev);
761d86d3
DL
1042 }
1043
1044 return 0;
1045}
1046
c030f2e4 1047/* wrapper of psp_ras_trigger_error */
1048int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1049 struct ras_inject_if *info)
1050{
1051 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1052 struct ta_ras_trigger_error_input block_info = {
828cfa29 1053 .block_id = amdgpu_ras_block_to_ta(info->head.block),
1054 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
c030f2e4 1055 .sub_block_index = info->head.sub_block_index,
1056 .address = info->address,
1057 .value = info->value,
1058 };
ab3b9de6
YL
1059 int ret = -EINVAL;
1060 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
1061 info->head.block,
1062 info->head.sub_block_index);
c030f2e4 1063
1064 if (!obj)
1065 return -EINVAL;
1066
22d4ba53 1067 if (!block_obj || !block_obj->hw_ops) {
1068 dev_info(adev->dev, "%s doesn't config ras function \n", get_ras_block_str(&info->head));
1069 return -EINVAL;
1070 }
1071
a6c44d25
JC
1072 /* Calculate XGMI relative offset */
1073 if (adev->gmc.xgmi.num_physical_nodes > 1) {
19744f5f
HZ
1074 block_info.address =
1075 amdgpu_xgmi_get_relative_phy_addr(adev,
1076 block_info.address);
a6c44d25
JC
1077 }
1078
22d4ba53 1079 if (info->head.block == AMDGPU_RAS_BLOCK__GFX) {
8b0fb0e9 1080 if (block_obj->hw_ops->ras_error_inject)
1081 ret = block_obj->hw_ops->ras_error_inject(adev, info);
22d4ba53 1082 } else {
1083 /* If defined special ras_error_inject(e.g: xgmi), implement special ras_error_inject */
1084 if (block_obj->hw_ops->ras_error_inject)
1085 ret = block_obj->hw_ops->ras_error_inject(adev, &block_info);
1086 else /*If not defined .ras_error_inject, use default ras_error_inject*/
1087 ret = psp_ras_trigger_error(&adev->psp, &block_info);
a5dd40ca
HZ
1088 }
1089
011907fd
DL
1090 if (ret)
1091 dev_err(adev->dev, "ras inject %s failed %d\n",
640ae42e 1092 get_ras_block_str(&info->head), ret);
c030f2e4 1093
1094 return ret;
1095}
1096
4d9f771e
LT
1097/**
1098 * amdgpu_ras_query_error_count -- Get error counts of all IPs
bbe04dec
IB
1099 * @adev: pointer to AMD GPU device
1100 * @ce_count: pointer to an integer to be set to the count of correctible errors.
1101 * @ue_count: pointer to an integer to be set to the count of uncorrectible
4d9f771e
LT
1102 * errors.
1103 *
1104 * If set, @ce_count or @ue_count, count and return the corresponding
1105 * error counts in those integer pointers. Return 0 if the device
1106 * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1107 */
1108int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1109 unsigned long *ce_count,
1110 unsigned long *ue_count)
c030f2e4 1111{
1112 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1113 struct ras_manager *obj;
a46751fb 1114 unsigned long ce, ue;
c030f2e4 1115
8ab0d6f0 1116 if (!adev->ras_enabled || !con)
4d9f771e
LT
1117 return -EOPNOTSUPP;
1118
1119 /* Don't count since no reporting.
1120 */
1121 if (!ce_count && !ue_count)
1122 return 0;
c030f2e4 1123
a46751fb
LT
1124 ce = 0;
1125 ue = 0;
c030f2e4 1126 list_for_each_entry(obj, &con->head, node) {
1127 struct ras_query_if info = {
1128 .head = obj->head,
1129 };
4d9f771e 1130 int res;
c030f2e4 1131
4d9f771e
LT
1132 res = amdgpu_ras_query_error_status(adev, &info);
1133 if (res)
1134 return res;
c030f2e4 1135
a46751fb
LT
1136 ce += info.ce_count;
1137 ue += info.ue_count;
c030f2e4 1138 }
1139
a46751fb
LT
1140 if (ce_count)
1141 *ce_count = ce;
1142
1143 if (ue_count)
1144 *ue_count = ue;
4d9f771e
LT
1145
1146 return 0;
c030f2e4 1147}
1148/* query/inject/cure end */
1149
1150
1151/* sysfs begin */
1152
466b1793 1153static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1154 struct ras_badpage **bps, unsigned int *count);
1155
1156static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1157{
1158 switch (flags) {
52dd95f2 1159 case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
466b1793 1160 return "R";
52dd95f2 1161 case AMDGPU_RAS_RETIRE_PAGE_PENDING:
466b1793 1162 return "P";
52dd95f2 1163 case AMDGPU_RAS_RETIRE_PAGE_FAULT:
466b1793 1164 default:
1165 return "F";
aec576f9 1166 }
466b1793 1167}
1168
f77c7109
AD
1169/**
1170 * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
466b1793 1171 *
1172 * It allows user to read the bad pages of vram on the gpu through
1173 * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1174 *
1175 * It outputs multiple lines, and each line stands for one gpu page.
1176 *
1177 * The format of one line is below,
1178 * gpu pfn : gpu page size : flags
1179 *
1180 * gpu pfn and gpu page size are printed in hex format.
1181 * flags can be one of below character,
f77c7109 1182 *
466b1793 1183 * R: reserved, this gpu page is reserved and not able to use.
f77c7109 1184 *
466b1793 1185 * P: pending for reserve, this gpu page is marked as bad, will be reserved
f77c7109
AD
1186 * in next window of page_reserve.
1187 *
466b1793 1188 * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1189 *
f77c7109
AD
1190 * Examples:
1191 *
1192 * .. code-block:: bash
1193 *
1194 * 0x00000001 : 0x00001000 : R
1195 * 0x00000002 : 0x00001000 : P
1196 *
466b1793 1197 */
1198
1199static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1200 struct kobject *kobj, struct bin_attribute *attr,
1201 char *buf, loff_t ppos, size_t count)
1202{
1203 struct amdgpu_ras *con =
1204 container_of(attr, struct amdgpu_ras, badpages_attr);
1205 struct amdgpu_device *adev = con->adev;
1206 const unsigned int element_size =
1207 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
d6ee400e
SA
1208 unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1209 unsigned int end = div64_ul(ppos + count - 1, element_size);
466b1793 1210 ssize_t s = 0;
1211 struct ras_badpage *bps = NULL;
1212 unsigned int bps_count = 0;
1213
1214 memset(buf, 0, count);
1215
1216 if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1217 return 0;
1218
1219 for (; start < end && start < bps_count; start++)
1220 s += scnprintf(&buf[s], element_size + 1,
1221 "0x%08x : 0x%08x : %1s\n",
1222 bps[start].bp,
1223 bps[start].size,
1224 amdgpu_ras_badpage_flags_str(bps[start].flags));
1225
1226 kfree(bps);
1227
1228 return s;
1229}
1230
c030f2e4 1231static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1232 struct device_attribute *attr, char *buf)
1233{
1234 struct amdgpu_ras *con =
1235 container_of(attr, struct amdgpu_ras, features_attr);
c030f2e4 1236
5212a3bd 1237 return scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features);
c030f2e4 1238}
1239
f848159b
GC
1240static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1241{
1242 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1243
1244 sysfs_remove_file_from_group(&adev->dev->kobj,
1245 &con->badpages_attr.attr,
1246 RAS_FS_NAME);
1247}
1248
c030f2e4 1249static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
1250{
1251 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1252 struct attribute *attrs[] = {
1253 &con->features_attr.attr,
1254 NULL
1255 };
1256 struct attribute_group group = {
eb0c3cd4 1257 .name = RAS_FS_NAME,
c030f2e4 1258 .attrs = attrs,
1259 };
1260
1261 sysfs_remove_group(&adev->dev->kobj, &group);
1262
1263 return 0;
1264}
1265
1266int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1267 struct ras_fs_if *head)
1268{
1269 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1270
1271 if (!obj || obj->attr_inuse)
1272 return -EINVAL;
1273
1274 get_obj(obj);
1275
1276 memcpy(obj->fs_data.sysfs_name,
1277 head->sysfs_name,
1278 sizeof(obj->fs_data.sysfs_name));
1279
1280 obj->sysfs_attr = (struct device_attribute){
1281 .attr = {
1282 .name = obj->fs_data.sysfs_name,
1283 .mode = S_IRUGO,
1284 },
1285 .show = amdgpu_ras_sysfs_read,
1286 };
163def43 1287 sysfs_attr_init(&obj->sysfs_attr.attr);
c030f2e4 1288
1289 if (sysfs_add_file_to_group(&adev->dev->kobj,
1290 &obj->sysfs_attr.attr,
eb0c3cd4 1291 RAS_FS_NAME)) {
c030f2e4 1292 put_obj(obj);
1293 return -EINVAL;
1294 }
1295
1296 obj->attr_inuse = 1;
1297
1298 return 0;
1299}
1300
1301int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1302 struct ras_common_if *head)
1303{
1304 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1305
1306 if (!obj || !obj->attr_inuse)
1307 return -EINVAL;
1308
1309 sysfs_remove_file_from_group(&adev->dev->kobj,
1310 &obj->sysfs_attr.attr,
eb0c3cd4 1311 RAS_FS_NAME);
c030f2e4 1312 obj->attr_inuse = 0;
1313 put_obj(obj);
1314
1315 return 0;
1316}
1317
1318static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1319{
1320 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1321 struct ras_manager *obj, *tmp;
1322
1323 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1324 amdgpu_ras_sysfs_remove(adev, &obj->head);
1325 }
1326
f848159b
GC
1327 if (amdgpu_bad_page_threshold != 0)
1328 amdgpu_ras_sysfs_remove_bad_page_node(adev);
1329
c030f2e4 1330 amdgpu_ras_sysfs_remove_feature_node(adev);
1331
1332 return 0;
1333}
1334/* sysfs end */
1335
ef177d11
AD
1336/**
1337 * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1338 *
1339 * Normally when there is an uncorrectable error, the driver will reset
1340 * the GPU to recover. However, in the event of an unrecoverable error,
1341 * the driver provides an interface to reboot the system automatically
1342 * in that event.
1343 *
1344 * The following file in debugfs provides that interface:
1345 * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1346 *
1347 * Usage:
1348 *
1349 * .. code-block:: bash
1350 *
1351 * echo true > .../ras/auto_reboot
1352 *
1353 */
c030f2e4 1354/* debugfs begin */
ea1b8c9b 1355static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
36ea1bd2 1356{
1357 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
ef0d7d20
LT
1358 struct drm_minor *minor = adev_to_drm(adev)->primary;
1359 struct dentry *dir;
36ea1bd2 1360
88293c03
ND
1361 dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1362 debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1363 &amdgpu_ras_debugfs_ctrl_ops);
1364 debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1365 &amdgpu_ras_debugfs_eeprom_ops);
7fb64071
LT
1366 debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
1367 &con->bad_page_cnt_threshold);
ef0d7d20
LT
1368 debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
1369 debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
c65b0805
LT
1370 debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
1371 &amdgpu_ras_debugfs_eeprom_size_ops);
1372 con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
1373 S_IRUGO, dir, adev,
1374 &amdgpu_ras_debugfs_eeprom_table_ops);
1375 amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
c688a06b
GC
1376
1377 /*
1378 * After one uncorrectable error happens, usually GPU recovery will
1379 * be scheduled. But due to the known problem in GPU recovery failing
1380 * to bring GPU back, below interface provides one direct way to
1381 * user to reboot system automatically in such case within
1382 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1383 * will never be called.
1384 */
88293c03 1385 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
66459e1d
GC
1386
1387 /*
1388 * User could set this not to clean up hardware's error count register
1389 * of RAS IPs during ras recovery.
1390 */
88293c03
ND
1391 debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
1392 &con->disable_ras_err_cnt_harvest);
1393 return dir;
36ea1bd2 1394}
1395
cedf7884 1396static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
88293c03
ND
1397 struct ras_fs_if *head,
1398 struct dentry *dir)
c030f2e4 1399{
c030f2e4 1400 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
c030f2e4 1401
88293c03 1402 if (!obj || !dir)
450f30ea 1403 return;
c030f2e4 1404
1405 get_obj(obj);
1406
1407 memcpy(obj->fs_data.debugfs_name,
1408 head->debugfs_name,
1409 sizeof(obj->fs_data.debugfs_name));
1410
88293c03
ND
1411 debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
1412 obj, &amdgpu_ras_debugfs_ops);
c030f2e4 1413}
1414
f9317014
TZ
1415void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
1416{
1417 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
88293c03 1418 struct dentry *dir;
c1509f3f 1419 struct ras_manager *obj;
f9317014
TZ
1420 struct ras_fs_if fs_info;
1421
1422 /*
1423 * it won't be called in resume path, no need to check
1424 * suspend and gpu reset status
1425 */
cedf7884 1426 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
f9317014
TZ
1427 return;
1428
88293c03 1429 dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
f9317014 1430
c1509f3f 1431 list_for_each_entry(obj, &con->head, node) {
f9317014
TZ
1432 if (amdgpu_ras_is_supported(adev, obj->head.block) &&
1433 (obj->attr_inuse == 1)) {
1434 sprintf(fs_info.debugfs_name, "%s_err_inject",
640ae42e 1435 get_ras_block_str(&obj->head));
f9317014 1436 fs_info.head = obj->head;
88293c03 1437 amdgpu_ras_debugfs_create(adev, &fs_info, dir);
f9317014
TZ
1438 }
1439 }
1440}
1441
c030f2e4 1442/* debugfs end */
1443
1444/* ras fs */
c3d4d45d
GC
1445static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
1446 amdgpu_ras_sysfs_badpages_read, NULL, 0);
1447static DEVICE_ATTR(features, S_IRUGO,
1448 amdgpu_ras_sysfs_features_read, NULL);
c030f2e4 1449static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
1450{
c3d4d45d
GC
1451 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1452 struct attribute_group group = {
1453 .name = RAS_FS_NAME,
1454 };
1455 struct attribute *attrs[] = {
1456 &con->features_attr.attr,
1457 NULL
1458 };
1459 struct bin_attribute *bin_attrs[] = {
1460 NULL,
1461 NULL,
1462 };
a069a9eb 1463 int r;
c030f2e4 1464
c3d4d45d
GC
1465 /* add features entry */
1466 con->features_attr = dev_attr_features;
1467 group.attrs = attrs;
1468 sysfs_attr_init(attrs[0]);
1469
1470 if (amdgpu_bad_page_threshold != 0) {
1471 /* add bad_page_features entry */
1472 bin_attr_gpu_vram_bad_pages.private = NULL;
1473 con->badpages_attr = bin_attr_gpu_vram_bad_pages;
1474 bin_attrs[0] = &con->badpages_attr;
1475 group.bin_attrs = bin_attrs;
1476 sysfs_bin_attr_init(bin_attrs[0]);
1477 }
1478
a069a9eb
AD
1479 r = sysfs_create_group(&adev->dev->kobj, &group);
1480 if (r)
1481 dev_err(adev->dev, "Failed to create RAS sysfs group!");
f848159b 1482
c030f2e4 1483 return 0;
1484}
1485
1486static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
1487{
88293c03
ND
1488 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1489 struct ras_manager *con_obj, *ip_obj, *tmp;
1490
1491 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1492 list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
1493 ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
1494 if (ip_obj)
1495 put_obj(ip_obj);
1496 }
1497 }
1498
c030f2e4 1499 amdgpu_ras_sysfs_remove_all(adev);
1500 return 0;
1501}
1502/* ras fs end */
1503
1504/* ih begin */
1505static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
1506{
1507 struct ras_ih_data *data = &obj->ih_data;
1508 struct amdgpu_iv_entry entry;
1509 int ret;
cf04dfd0 1510 struct ras_err_data err_data = {0, 0, 0, NULL};
c030f2e4 1511
1512 while (data->rptr != data->wptr) {
1513 rmb();
1514 memcpy(&entry, &data->ring[data->rptr],
1515 data->element_size);
1516
1517 wmb();
1518 data->rptr = (data->aligned_element_size +
1519 data->rptr) % data->ring_size;
1520
c030f2e4 1521 if (data->cb) {
f524dd54
TZ
1522 if (amdgpu_ras_is_poison_mode_supported(obj->adev) &&
1523 obj->head.block == AMDGPU_RAS_BLOCK__UMC)
1524 dev_info(obj->adev->dev,
1525 "Poison is created, no user action is needed.\n");
1526 else {
1527 /* Let IP handle its data, maybe we need get the output
1528 * from the callback to udpate the error type/count, etc
1529 */
b54ce6c9 1530 memset(&err_data, 0, sizeof(err_data));
f524dd54
TZ
1531 ret = data->cb(obj->adev, &err_data, &entry);
1532 /* ue will trigger an interrupt, and in that case
1533 * we need do a reset to recovery the whole system.
1534 * But leave IP do that recovery, here we just dispatch
1535 * the error.
51437623 1536 */
f524dd54
TZ
1537 if (ret == AMDGPU_RAS_SUCCESS) {
1538 /* these counts could be left as 0 if
1539 * some blocks do not count error number
1540 */
1541 obj->err_data.ue_count += err_data.ue_count;
1542 obj->err_data.ce_count += err_data.ce_count;
1543 }
c030f2e4 1544 }
c030f2e4 1545 }
1546 }
1547}
1548
1549static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
1550{
1551 struct ras_ih_data *data =
1552 container_of(work, struct ras_ih_data, ih_work);
1553 struct ras_manager *obj =
1554 container_of(data, struct ras_manager, ih_data);
1555
1556 amdgpu_ras_interrupt_handler(obj);
1557}
1558
1559int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
1560 struct ras_dispatch_if *info)
1561{
1562 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1563 struct ras_ih_data *data = &obj->ih_data;
1564
1565 if (!obj)
1566 return -EINVAL;
1567
1568 if (data->inuse == 0)
1569 return 0;
1570
1571 /* Might be overflow... */
1572 memcpy(&data->ring[data->wptr], info->entry,
1573 data->element_size);
1574
1575 wmb();
1576 data->wptr = (data->aligned_element_size +
1577 data->wptr) % data->ring_size;
1578
1579 schedule_work(&data->ih_work);
1580
1581 return 0;
1582}
1583
1584int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
1585 struct ras_ih_if *info)
1586{
1587 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1588 struct ras_ih_data *data;
1589
1590 if (!obj)
1591 return -EINVAL;
1592
1593 data = &obj->ih_data;
1594 if (data->inuse == 0)
1595 return 0;
1596
1597 cancel_work_sync(&data->ih_work);
1598
1599 kfree(data->ring);
1600 memset(data, 0, sizeof(*data));
1601 put_obj(obj);
1602
1603 return 0;
1604}
1605
1606int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
1607 struct ras_ih_if *info)
1608{
1609 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1610 struct ras_ih_data *data;
1611
1612 if (!obj) {
1613 /* in case we registe the IH before enable ras feature */
1614 obj = amdgpu_ras_create_obj(adev, &info->head);
1615 if (!obj)
1616 return -EINVAL;
1617 } else
1618 get_obj(obj);
1619
1620 data = &obj->ih_data;
1621 /* add the callback.etc */
1622 *data = (struct ras_ih_data) {
1623 .inuse = 0,
1624 .cb = info->cb,
1625 .element_size = sizeof(struct amdgpu_iv_entry),
1626 .rptr = 0,
1627 .wptr = 0,
1628 };
1629
1630 INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
1631
1632 data->aligned_element_size = ALIGN(data->element_size, 8);
1633 /* the ring can store 64 iv entries. */
1634 data->ring_size = 64 * data->aligned_element_size;
1635 data->ring = kmalloc(data->ring_size, GFP_KERNEL);
1636 if (!data->ring) {
1637 put_obj(obj);
1638 return -ENOMEM;
1639 }
1640
1641 /* IH is ready */
1642 data->inuse = 1;
1643
1644 return 0;
1645}
1646
1647static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
1648{
1649 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1650 struct ras_manager *obj, *tmp;
1651
1652 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1653 struct ras_ih_if info = {
1654 .head = obj->head,
1655 };
1656 amdgpu_ras_interrupt_remove_handler(adev, &info);
1657 }
1658
1659 return 0;
1660}
1661/* ih end */
1662
313c8fd3
GC
1663/* traversal all IPs except NBIO to query error counter */
1664static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
1665{
1666 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1667 struct ras_manager *obj;
1668
8ab0d6f0 1669 if (!adev->ras_enabled || !con)
313c8fd3
GC
1670 return;
1671
1672 list_for_each_entry(obj, &con->head, node) {
1673 struct ras_query_if info = {
1674 .head = obj->head,
1675 };
1676
1677 /*
1678 * PCIE_BIF IP has one different isr by ras controller
1679 * interrupt, the specific ras counter query will be
1680 * done in that isr. So skip such block from common
1681 * sync flood interrupt isr calling.
1682 */
1683 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
1684 continue;
1685
cf63b702
SY
1686 /*
1687 * this is a workaround for aldebaran, skip send msg to
1688 * smu to get ecc_info table due to smu handle get ecc
1689 * info table failed temporarily.
1690 * should be removed until smu fix handle ecc_info table.
1691 */
1692 if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
1693 (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)))
1694 continue;
1695
761d86d3 1696 amdgpu_ras_query_error_status(adev, &info);
313c8fd3
GC
1697 }
1698}
1699
3f975d0f 1700/* Parse RdRspStatus and WrRspStatus */
cd92df93
LJ
1701static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
1702 struct ras_query_if *info)
3f975d0f 1703{
8b0fb0e9 1704 struct amdgpu_ras_block_object* block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, info->head.sub_block_index);
3f975d0f
SY
1705 /*
1706 * Only two block need to query read/write
1707 * RspStatus at current state
1708 */
5e67bba3 1709 if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
1710 (info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
1711 return ;
1712
1713 block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, info->head.sub_block_index);
1714 if (!block_obj || !block_obj->hw_ops) {
1715 dev_info(adev->dev, "%s doesn't config ras function \n", get_ras_block_str(&info->head));
1716 return ;
3f975d0f 1717 }
5e67bba3 1718
1719 if (block_obj->hw_ops->query_ras_error_status)
ab3b9de6 1720 block_obj->hw_ops->query_ras_error_status(adev);
5e67bba3 1721
3f975d0f
SY
1722}
1723
1724static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
1725{
1726 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1727 struct ras_manager *obj;
1728
8ab0d6f0 1729 if (!adev->ras_enabled || !con)
3f975d0f
SY
1730 return;
1731
1732 list_for_each_entry(obj, &con->head, node) {
1733 struct ras_query_if info = {
1734 .head = obj->head,
1735 };
1736
1737 amdgpu_ras_error_status_query(adev, &info);
1738 }
1739}
1740
c030f2e4 1741/* recovery begin */
466b1793 1742
1743/* return 0 on success.
1744 * caller need free bps.
1745 */
1746static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1747 struct ras_badpage **bps, unsigned int *count)
1748{
1749 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1750 struct ras_err_handler_data *data;
1751 int i = 0;
732f2a30 1752 int ret = 0, status;
466b1793 1753
1754 if (!con || !con->eh_data || !bps || !count)
1755 return -EINVAL;
1756
1757 mutex_lock(&con->recovery_lock);
1758 data = con->eh_data;
1759 if (!data || data->count == 0) {
1760 *bps = NULL;
46cf2fec 1761 ret = -EINVAL;
466b1793 1762 goto out;
1763 }
1764
1765 *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
1766 if (!*bps) {
1767 ret = -ENOMEM;
1768 goto out;
1769 }
1770
1771 for (; i < data->count; i++) {
1772 (*bps)[i] = (struct ras_badpage){
9dc23a63 1773 .bp = data->bps[i].retired_page,
466b1793 1774 .size = AMDGPU_GPU_PAGE_SIZE,
52dd95f2 1775 .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
466b1793 1776 };
ec6aae97 1777 status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
676deb38 1778 data->bps[i].retired_page);
732f2a30 1779 if (status == -EBUSY)
52dd95f2 1780 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
732f2a30 1781 else if (status == -ENOENT)
52dd95f2 1782 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
466b1793 1783 }
1784
1785 *count = data->count;
1786out:
1787 mutex_unlock(&con->recovery_lock);
1788 return ret;
1789}
1790
c030f2e4 1791static void amdgpu_ras_do_recovery(struct work_struct *work)
1792{
1793 struct amdgpu_ras *ras =
1794 container_of(work, struct amdgpu_ras, recovery_work);
b3dbd6d3
JC
1795 struct amdgpu_device *remote_adev = NULL;
1796 struct amdgpu_device *adev = ras->adev;
1797 struct list_head device_list, *device_list_handle = NULL;
b3dbd6d3 1798
f75e94d8 1799 if (!ras->disable_ras_err_cnt_harvest) {
d95e8e97
DL
1800 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
1801
f75e94d8
GC
1802 /* Build list of devices to query RAS related errors */
1803 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
1804 device_list_handle = &hive->device_list;
1805 } else {
1806 INIT_LIST_HEAD(&device_list);
1807 list_add_tail(&adev->gmc.xgmi.head, &device_list);
1808 device_list_handle = &device_list;
1809 }
c030f2e4 1810
f75e94d8 1811 list_for_each_entry(remote_adev,
3f975d0f
SY
1812 device_list_handle, gmc.xgmi.head) {
1813 amdgpu_ras_query_err_status(remote_adev);
f75e94d8 1814 amdgpu_ras_log_on_err_counter(remote_adev);
3f975d0f 1815 }
d95e8e97
DL
1816
1817 amdgpu_put_xgmi_hive(hive);
b3dbd6d3 1818 }
313c8fd3 1819
93af20f7 1820 if (amdgpu_device_should_recover_gpu(ras->adev))
2f530724 1821 amdgpu_device_gpu_recover(ras->adev, NULL);
c030f2e4 1822 atomic_set(&ras->in_recovery, 0);
1823}
1824
c030f2e4 1825/* alloc/realloc bps array */
1826static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
1827 struct ras_err_handler_data *data, int pages)
1828{
1829 unsigned int old_space = data->count + data->space_left;
1830 unsigned int new_space = old_space + pages;
9dc23a63
TZ
1831 unsigned int align_space = ALIGN(new_space, 512);
1832 void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
9dc23a63 1833
676deb38 1834 if (!bps) {
9dc23a63 1835 kfree(bps);
c030f2e4 1836 return -ENOMEM;
9dc23a63 1837 }
c030f2e4 1838
1839 if (data->bps) {
9dc23a63 1840 memcpy(bps, data->bps,
c030f2e4 1841 data->count * sizeof(*data->bps));
1842 kfree(data->bps);
1843 }
1844
9dc23a63 1845 data->bps = bps;
c030f2e4 1846 data->space_left += align_space - old_space;
1847 return 0;
1848}
1849
1850/* it deal with vram only. */
1851int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
9dc23a63 1852 struct eeprom_table_record *bps, int pages)
c030f2e4 1853{
1854 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
73aa8e1a 1855 struct ras_err_handler_data *data;
c030f2e4 1856 int ret = 0;
676deb38 1857 uint32_t i;
c030f2e4 1858
73aa8e1a 1859 if (!con || !con->eh_data || !bps || pages <= 0)
c030f2e4 1860 return 0;
1861
1862 mutex_lock(&con->recovery_lock);
73aa8e1a 1863 data = con->eh_data;
c030f2e4 1864 if (!data)
1865 goto out;
1866
676deb38
DL
1867 for (i = 0; i < pages; i++) {
1868 if (amdgpu_ras_check_bad_page_unlock(con,
1869 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
1870 continue;
1871
1872 if (!data->space_left &&
1873 amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
c030f2e4 1874 ret = -ENOMEM;
1875 goto out;
1876 }
1877
ec6aae97 1878 amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
676deb38
DL
1879 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT,
1880 AMDGPU_GPU_PAGE_SIZE);
9dc23a63 1881
676deb38
DL
1882 memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
1883 data->count++;
1884 data->space_left--;
1885 }
c030f2e4 1886out:
1887 mutex_unlock(&con->recovery_lock);
1888
1889 return ret;
1890}
1891
78ad00c9
TZ
1892/*
1893 * write error record array to eeprom, the function should be
1894 * protected by recovery_lock
1895 */
22503d80 1896int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
78ad00c9
TZ
1897{
1898 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1899 struct ras_err_handler_data *data;
8a3e801f 1900 struct amdgpu_ras_eeprom_control *control;
78ad00c9
TZ
1901 int save_count;
1902
1903 if (!con || !con->eh_data)
1904 return 0;
1905
d9a69fe5 1906 mutex_lock(&con->recovery_lock);
8a3e801f 1907 control = &con->eeprom_control;
78ad00c9 1908 data = con->eh_data;
0686627b 1909 save_count = data->count - control->ras_num_recs;
d9a69fe5 1910 mutex_unlock(&con->recovery_lock);
78ad00c9 1911 /* only new entries are saved */
b1628425 1912 if (save_count > 0) {
63d4c081
LT
1913 if (amdgpu_ras_eeprom_append(control,
1914 &data->bps[control->ras_num_recs],
1915 save_count)) {
6952e99c 1916 dev_err(adev->dev, "Failed to save EEPROM table data!");
78ad00c9
TZ
1917 return -EIO;
1918 }
1919
b1628425
GC
1920 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
1921 }
1922
78ad00c9
TZ
1923 return 0;
1924}
1925
1926/*
1927 * read error record array in eeprom and reserve enough space for
1928 * storing new bad pages
1929 */
1930static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
1931{
1932 struct amdgpu_ras_eeprom_control *control =
6457205c 1933 &adev->psp.ras_context.ras->eeprom_control;
e4e6a589
LT
1934 struct eeprom_table_record *bps;
1935 int ret;
78ad00c9
TZ
1936
1937 /* no bad page record, skip eeprom access */
0686627b 1938 if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
e4e6a589 1939 return 0;
78ad00c9 1940
0686627b 1941 bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
78ad00c9
TZ
1942 if (!bps)
1943 return -ENOMEM;
1944
0686627b 1945 ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
e4e6a589 1946 if (ret)
6952e99c 1947 dev_err(adev->dev, "Failed to load EEPROM table records!");
e4e6a589 1948 else
0686627b 1949 ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
78ad00c9 1950
78ad00c9
TZ
1951 kfree(bps);
1952 return ret;
1953}
1954
676deb38
DL
1955static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
1956 uint64_t addr)
1957{
1958 struct ras_err_handler_data *data = con->eh_data;
1959 int i;
1960
1961 addr >>= AMDGPU_GPU_PAGE_SHIFT;
1962 for (i = 0; i < data->count; i++)
1963 if (addr == data->bps[i].retired_page)
1964 return true;
1965
1966 return false;
1967}
1968
6e4be987
TZ
1969/*
1970 * check if an address belongs to bad page
1971 *
1972 * Note: this check is only for umc block
1973 */
1974static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
1975 uint64_t addr)
1976{
1977 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
6e4be987
TZ
1978 bool ret = false;
1979
1980 if (!con || !con->eh_data)
1981 return ret;
1982
1983 mutex_lock(&con->recovery_lock);
676deb38 1984 ret = amdgpu_ras_check_bad_page_unlock(con, addr);
6e4be987
TZ
1985 mutex_unlock(&con->recovery_lock);
1986 return ret;
1987}
1988
e5c04edf 1989static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
e4e6a589 1990 uint32_t max_count)
c84d4670 1991{
e5c04edf 1992 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
c84d4670
GC
1993
1994 /*
1995 * Justification of value bad_page_cnt_threshold in ras structure
1996 *
1997 * Generally, -1 <= amdgpu_bad_page_threshold <= max record length
1998 * in eeprom, and introduce two scenarios accordingly.
1999 *
2000 * Bad page retirement enablement:
2001 * - If amdgpu_bad_page_threshold = -1,
2002 * bad_page_cnt_threshold = typical value by formula.
2003 *
2004 * - When the value from user is 0 < amdgpu_bad_page_threshold <
2005 * max record length in eeprom, use it directly.
2006 *
2007 * Bad page retirement disablement:
2008 * - If amdgpu_bad_page_threshold = 0, bad page retirement
2009 * functionality is disabled, and bad_page_cnt_threshold will
2010 * take no effect.
2011 */
2012
e4e6a589
LT
2013 if (amdgpu_bad_page_threshold < 0) {
2014 u64 val = adev->gmc.mc_vram_size;
c84d4670 2015
e4e6a589 2016 do_div(val, RAS_BAD_PAGE_COVER);
e5c04edf 2017 con->bad_page_cnt_threshold = min(lower_32_bits(val),
e4e6a589 2018 max_count);
e5c04edf 2019 } else {
e4e6a589
LT
2020 con->bad_page_cnt_threshold = min_t(int, max_count,
2021 amdgpu_bad_page_threshold);
c84d4670
GC
2022 }
2023}
2024
1a6fc071 2025int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
c030f2e4 2026{
2027 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4d1337d2 2028 struct ras_err_handler_data **data;
e4e6a589 2029 u32 max_eeprom_records_count = 0;
b82e65a9 2030 bool exc_err_limit = false;
78ad00c9 2031 int ret;
c030f2e4 2032
1d9d2ca8
LT
2033 if (!con)
2034 return 0;
2035
2036 /* Allow access to RAS EEPROM via debugfs, when the ASIC
2037 * supports RAS and debugfs is enabled, but when
2038 * adev->ras_enabled is unset, i.e. when "ras_enable"
2039 * module parameter is set to 0.
2040 */
2041 con->adev = adev;
2042
2043 if (!adev->ras_enabled)
4d1337d2
AG
2044 return 0;
2045
1d9d2ca8 2046 data = &con->eh_data;
1a6fc071
TZ
2047 *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
2048 if (!*data) {
2049 ret = -ENOMEM;
2050 goto out;
2051 }
c030f2e4 2052
2053 mutex_init(&con->recovery_lock);
2054 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
2055 atomic_set(&con->in_recovery, 0);
c030f2e4 2056
e4e6a589
LT
2057 max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count();
2058 amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
c84d4670 2059
e5086659 2060 /* Todo: During test the SMU might fail to read the eeprom through I2C
2061 * when the GPU is pending on XGMI reset during probe time
2062 * (Mostly after second bus reset), skip it now
2063 */
2064 if (adev->gmc.xgmi.pending_reset)
2065 return 0;
b82e65a9
GC
2066 ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit);
2067 /*
2068 * This calling fails when exc_err_limit is true or
2069 * ret != 0.
2070 */
2071 if (exc_err_limit || ret)
1a6fc071 2072 goto free;
78ad00c9 2073
0686627b 2074 if (con->eeprom_control.ras_num_recs) {
78ad00c9
TZ
2075 ret = amdgpu_ras_load_bad_pages(adev);
2076 if (ret)
1a6fc071 2077 goto free;
513befa6 2078
bc143d8b 2079 amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
78ad00c9 2080 }
c030f2e4 2081
12b2cab7
MJ
2082#ifdef CONFIG_X86_MCE_AMD
2083 if ((adev->asic_type == CHIP_ALDEBARAN) &&
2084 (adev->gmc.xgmi.connected_to_cpu))
91a1a52d 2085 amdgpu_register_bad_pages_mca_notifier(adev);
12b2cab7 2086#endif
c030f2e4 2087 return 0;
1a6fc071 2088
1a6fc071 2089free:
1a6fc071 2090 kfree((*data)->bps);
1a6fc071 2091 kfree(*data);
1995b3a3 2092 con->eh_data = NULL;
1a6fc071 2093out:
cf696091 2094 dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
1a6fc071 2095
b82e65a9
GC
2096 /*
2097 * Except error threshold exceeding case, other failure cases in this
2098 * function would not fail amdgpu driver init.
2099 */
2100 if (!exc_err_limit)
2101 ret = 0;
2102 else
2103 ret = -EINVAL;
2104
1a6fc071 2105 return ret;
c030f2e4 2106}
2107
2108static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
2109{
2110 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2111 struct ras_err_handler_data *data = con->eh_data;
2112
1a6fc071
TZ
2113 /* recovery_init failed to init it, fini is useless */
2114 if (!data)
2115 return 0;
2116
c030f2e4 2117 cancel_work_sync(&con->recovery_work);
c030f2e4 2118
2119 mutex_lock(&con->recovery_lock);
2120 con->eh_data = NULL;
2121 kfree(data->bps);
2122 kfree(data);
2123 mutex_unlock(&con->recovery_lock);
2124
2125 return 0;
2126}
2127/* recovery end */
2128
084e2640 2129static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
5436ab94 2130{
084e2640
LT
2131 return adev->asic_type == CHIP_VEGA10 ||
2132 adev->asic_type == CHIP_VEGA20 ||
2133 adev->asic_type == CHIP_ARCTURUS ||
75f06251 2134 adev->asic_type == CHIP_ALDEBARAN ||
084e2640 2135 adev->asic_type == CHIP_SIENNA_CICHLID;
5436ab94
SY
2136}
2137
f50160cf
SY
2138/*
2139 * this is workaround for vega20 workstation sku,
2140 * force enable gfx ras, ignore vbios gfx ras flag
2141 * due to GC EDC can not write
2142 */
e509965e 2143static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
f50160cf
SY
2144{
2145 struct atom_context *ctx = adev->mode_info.atom_context;
2146
2147 if (!ctx)
2148 return;
2149
2150 if (strnstr(ctx->vbios_version, "D16406",
e11d5e0d
SY
2151 sizeof(ctx->vbios_version)) ||
2152 strnstr(ctx->vbios_version, "D36002",
2153 sizeof(ctx->vbios_version)))
8ab0d6f0 2154 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
f50160cf
SY
2155}
2156
5caf466a 2157/*
2158 * check hardware's ras ability which will be saved in hw_supported.
2159 * if hardware does not support ras, we can skip some ras initializtion and
2160 * forbid some ras operations from IP.
2161 * if software itself, say boot parameter, limit the ras ability. We still
2162 * need allow IP do some limited operations, like disable. In such case,
2163 * we have to initialize ras as normal. but need check if operation is
2164 * allowed or not in each function.
2165 */
e509965e 2166static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
c030f2e4 2167{
8ab0d6f0 2168 adev->ras_hw_enabled = adev->ras_enabled = 0;
c030f2e4 2169
88474cca 2170 if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw ||
084e2640 2171 !amdgpu_ras_asic_supported(adev))
5caf466a 2172 return;
b404ae82 2173
75f06251
HZ
2174 if (!adev->gmc.xgmi.connected_to_cpu) {
2175 if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
2176 dev_info(adev->dev, "MEM ECC is active.\n");
8ab0d6f0 2177 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
e509965e 2178 1 << AMDGPU_RAS_BLOCK__DF);
75f06251
HZ
2179 } else {
2180 dev_info(adev->dev, "MEM ECC is not presented.\n");
2181 }
88474cca 2182
75f06251
HZ
2183 if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
2184 dev_info(adev->dev, "SRAM ECC is active.\n");
8ab0d6f0 2185 adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
e509965e 2186 1 << AMDGPU_RAS_BLOCK__DF);
75f06251
HZ
2187 } else {
2188 dev_info(adev->dev, "SRAM ECC is not presented.\n");
2189 }
2190 } else {
2191 /* driver only manages a few IP blocks RAS feature
2192 * when GPU is connected cpu through XGMI */
8ab0d6f0 2193 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
e509965e
LT
2194 1 << AMDGPU_RAS_BLOCK__SDMA |
2195 1 << AMDGPU_RAS_BLOCK__MMHUB);
75f06251 2196 }
88474cca 2197
e509965e 2198 amdgpu_ras_get_quirks(adev);
f50160cf 2199
88474cca 2200 /* hw_supported needs to be aligned with RAS block mask. */
8ab0d6f0 2201 adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
b404ae82 2202
8ab0d6f0
LT
2203 adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
2204 adev->ras_hw_enabled & amdgpu_ras_mask;
c030f2e4 2205}
2206
05adfd80
LT
2207static void amdgpu_ras_counte_dw(struct work_struct *work)
2208{
2209 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
2210 ras_counte_delay_work.work);
2211 struct amdgpu_device *adev = con->adev;
a3fbb0d8 2212 struct drm_device *dev = adev_to_drm(adev);
05adfd80
LT
2213 unsigned long ce_count, ue_count;
2214 int res;
2215
2216 res = pm_runtime_get_sync(dev->dev);
2217 if (res < 0)
2218 goto Out;
2219
2220 /* Cache new values.
2221 */
4d9f771e
LT
2222 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) {
2223 atomic_set(&con->ras_ce_count, ce_count);
2224 atomic_set(&con->ras_ue_count, ue_count);
2225 }
05adfd80
LT
2226
2227 pm_runtime_mark_last_busy(dev->dev);
2228Out:
2229 pm_runtime_put_autosuspend(dev->dev);
2230}
2231
c030f2e4 2232int amdgpu_ras_init(struct amdgpu_device *adev)
2233{
2234 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4e644fff 2235 int r;
e4348849 2236 bool df_poison, umc_poison;
c030f2e4 2237
b404ae82 2238 if (con)
c030f2e4 2239 return 0;
2240
2241 con = kmalloc(sizeof(struct amdgpu_ras) +
640ae42e
JC
2242 sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
2243 sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
c030f2e4 2244 GFP_KERNEL|__GFP_ZERO);
2245 if (!con)
2246 return -ENOMEM;
2247
05adfd80
LT
2248 con->adev = adev;
2249 INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
2250 atomic_set(&con->ras_ce_count, 0);
2251 atomic_set(&con->ras_ue_count, 0);
2252
c030f2e4 2253 con->objs = (struct ras_manager *)(con + 1);
2254
2255 amdgpu_ras_set_context(adev, con);
2256
e509965e
LT
2257 amdgpu_ras_check_supported(adev);
2258
7ddd9770 2259 if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
970fd197
SY
2260 /* set gfx block ras context feature for VEGA20 Gaming
2261 * send ras disable cmd to ras ta during ras late init.
2262 */
8ab0d6f0 2263 if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
970fd197
SY
2264 con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
2265
2266 return 0;
2267 }
2268
5e91160a 2269 r = 0;
5436ab94 2270 goto release_con;
fb2a3607
HZ
2271 }
2272
c030f2e4 2273 con->features = 0;
2274 INIT_LIST_HEAD(&con->head);
108c6a63 2275 /* Might need get this flag from vbios. */
2276 con->flags = RAS_DEFAULT_FLAGS;
c030f2e4 2277
6e36f231
HZ
2278 /* initialize nbio ras function ahead of any other
2279 * ras functions so hardware fatal error interrupt
2280 * can be enabled as early as possible */
2281 switch (adev->asic_type) {
2282 case CHIP_VEGA20:
2283 case CHIP_ARCTURUS:
2284 case CHIP_ALDEBARAN:
2e54fe5d 2285 if (!adev->gmc.xgmi.connected_to_cpu) {
2286 adev->nbio.ras = &nbio_v7_4_ras;
2287 amdgpu_ras_register_ras_block(adev, &adev->nbio.ras->ras_block);
2288 }
6e36f231
HZ
2289 break;
2290 default:
2291 /* nbio ras is not available */
2292 break;
2293 }
2294
2e54fe5d 2295 if (adev->nbio.ras &&
2296 adev->nbio.ras->init_ras_controller_interrupt) {
2297 r = adev->nbio.ras->init_ras_controller_interrupt(adev);
4e644fff 2298 if (r)
5436ab94 2299 goto release_con;
4e644fff
HZ
2300 }
2301
2e54fe5d 2302 if (adev->nbio.ras &&
2303 adev->nbio.ras->init_ras_err_event_athub_interrupt) {
2304 r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
4e644fff 2305 if (r)
5436ab94 2306 goto release_con;
4e644fff
HZ
2307 }
2308
e4348849 2309 /* Init poison supported flag, the default value is false */
655ff353
TZ
2310 if (adev->gmc.xgmi.connected_to_cpu) {
2311 /* enabled by default when GPU is connected to CPU */
2312 con->poison_supported = true;
2313 }
2314 else if (adev->df.funcs &&
e4348849 2315 adev->df.funcs->query_ras_poison_mode &&
efe17d5a 2316 adev->umc.ras &&
2317 adev->umc.ras->query_ras_poison_mode) {
e4348849
TZ
2318 df_poison =
2319 adev->df.funcs->query_ras_poison_mode(adev);
2320 umc_poison =
efe17d5a 2321 adev->umc.ras->query_ras_poison_mode(adev);
e4348849
TZ
2322 /* Only poison is set in both DF and UMC, we can support it */
2323 if (df_poison && umc_poison)
2324 con->poison_supported = true;
2325 else if (df_poison != umc_poison)
2326 dev_warn(adev->dev, "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
2327 df_poison, umc_poison);
2328 }
2329
5e91160a
GC
2330 if (amdgpu_ras_fs_init(adev)) {
2331 r = -EINVAL;
5436ab94 2332 goto release_con;
5e91160a 2333 }
c030f2e4 2334
6952e99c 2335 dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
e509965e 2336 "hardware ability[%x] ras_mask[%x]\n",
8ab0d6f0 2337 adev->ras_hw_enabled, adev->ras_enabled);
e509965e 2338
c030f2e4 2339 return 0;
5436ab94 2340release_con:
c030f2e4 2341 amdgpu_ras_set_context(adev, NULL);
2342 kfree(con);
2343
5e91160a 2344 return r;
c030f2e4 2345}
2346
8f6368a9 2347int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
134d16d5
JC
2348{
2349 if (adev->gmc.xgmi.connected_to_cpu)
2350 return 1;
2351 return 0;
2352}
2353
2354static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
2355 struct ras_common_if *ras_block)
2356{
2357 struct ras_query_if info = {
2358 .head = *ras_block,
2359 };
2360
2361 if (!amdgpu_persistent_edc_harvesting_supported(adev))
2362 return 0;
2363
2364 if (amdgpu_ras_query_error_status(adev, &info) != 0)
2365 DRM_WARN("RAS init harvest failure");
2366
2367 if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
2368 DRM_WARN("RAS init harvest reset failure");
2369
2370 return 0;
2371}
2372
e4348849
TZ
2373bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
2374{
2375 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2376
2377 if (!con)
2378 return false;
2379
2380 return con->poison_supported;
2381}
2382
b293e891
HZ
2383/* helper function to handle common stuff in ip late init phase */
2384int amdgpu_ras_late_init(struct amdgpu_device *adev,
2385 struct ras_common_if *ras_block,
2386 struct ras_fs_if *fs_info,
2387 struct ras_ih_if *ih_info)
2388{
05adfd80
LT
2389 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2390 unsigned long ue_count, ce_count;
b293e891
HZ
2391 int r;
2392
2393 /* disable RAS feature per IP block if it is not supported */
2394 if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
2395 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
2396 return 0;
2397 }
2398
2399 r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
2400 if (r) {
9080a18f 2401 if (adev->in_suspend || amdgpu_in_reset(adev)) {
b293e891
HZ
2402 /* in resume phase, if fail to enable ras,
2403 * clean up all ras fs nodes, and disable ras */
2404 goto cleanup;
2405 } else
2406 return r;
2407 }
2408
134d16d5
JC
2409 /* check for errors on warm reset edc persisant supported ASIC */
2410 amdgpu_persistent_edc_harvesting(adev, ras_block);
2411
b293e891 2412 /* in resume phase, no need to create ras fs node */
53b3f8f4 2413 if (adev->in_suspend || amdgpu_in_reset(adev))
b293e891
HZ
2414 return 0;
2415
2416 if (ih_info->cb) {
2417 r = amdgpu_ras_interrupt_add_handler(adev, ih_info);
2418 if (r)
2419 goto interrupt;
2420 }
2421
b293e891
HZ
2422 r = amdgpu_ras_sysfs_create(adev, fs_info);
2423 if (r)
2424 goto sysfs;
2425
05adfd80
LT
2426 /* Those are the cached values at init.
2427 */
4d9f771e
LT
2428 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) {
2429 atomic_set(&con->ras_ce_count, ce_count);
2430 atomic_set(&con->ras_ue_count, ue_count);
2431 }
05adfd80 2432
b293e891
HZ
2433 return 0;
2434cleanup:
2435 amdgpu_ras_sysfs_remove(adev, ras_block);
2436sysfs:
b293e891
HZ
2437 if (ih_info->cb)
2438 amdgpu_ras_interrupt_remove_handler(adev, ih_info);
2439interrupt:
2440 amdgpu_ras_feature_enable(adev, ras_block, 0);
2441 return r;
2442}
2443
2444/* helper function to remove ras fs node and interrupt handler */
2445void amdgpu_ras_late_fini(struct amdgpu_device *adev,
2446 struct ras_common_if *ras_block,
2447 struct ras_ih_if *ih_info)
2448{
2449 if (!ras_block || !ih_info)
2450 return;
2451
2452 amdgpu_ras_sysfs_remove(adev, ras_block);
b293e891 2453 if (ih_info->cb)
f3729f7b 2454 amdgpu_ras_interrupt_remove_handler(adev, ih_info);
b293e891
HZ
2455}
2456
a564808e 2457/* do some init work after IP late init as dependence.
511fdbc3 2458 * and it runs in resume/gpu reset/booting up cases.
a564808e 2459 */
511fdbc3 2460void amdgpu_ras_resume(struct amdgpu_device *adev)
108c6a63 2461{
2462 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2463 struct ras_manager *obj, *tmp;
2464
8ab0d6f0 2465 if (!adev->ras_enabled || !con) {
970fd197
SY
2466 /* clean ras context for VEGA20 Gaming after send ras disable cmd */
2467 amdgpu_release_ras_context(adev);
2468
108c6a63 2469 return;
970fd197 2470 }
108c6a63 2471
108c6a63 2472 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
191051a1 2473 /* Set up all other IPs which are not implemented. There is a
2474 * tricky thing that IP's actual ras error type should be
2475 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
2476 * ERROR_NONE make sense anyway.
2477 */
2478 amdgpu_ras_enable_all_features(adev, 1);
2479
2480 /* We enable ras on all hw_supported block, but as boot
2481 * parameter might disable some of them and one or more IP has
2482 * not implemented yet. So we disable them on behalf.
2483 */
108c6a63 2484 list_for_each_entry_safe(obj, tmp, &con->head, node) {
2485 if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
2486 amdgpu_ras_feature_enable(adev, &obj->head, 0);
2487 /* there should be no any reference. */
2488 WARN_ON(alive_obj(obj));
2489 }
191051a1 2490 }
108c6a63 2491 }
2492}
2493
511fdbc3 2494void amdgpu_ras_suspend(struct amdgpu_device *adev)
2495{
2496 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2497
8ab0d6f0 2498 if (!adev->ras_enabled || !con)
511fdbc3 2499 return;
2500
2501 amdgpu_ras_disable_all_features(adev, 0);
2502 /* Make sure all ras objects are disabled. */
2503 if (con->features)
2504 amdgpu_ras_disable_all_features(adev, 1);
2505}
2506
c030f2e4 2507/* do some fini work before IP fini as dependence */
2508int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
2509{
2510 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2511
8ab0d6f0 2512 if (!adev->ras_enabled || !con)
c030f2e4 2513 return 0;
2514
72c8c97b 2515
c030f2e4 2516 /* Need disable ras on all IPs here before ip [hw/sw]fini */
2517 amdgpu_ras_disable_all_features(adev, 0);
2518 amdgpu_ras_recovery_fini(adev);
2519 return 0;
2520}
2521
2522int amdgpu_ras_fini(struct amdgpu_device *adev)
2523{
2524 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2525
8ab0d6f0 2526 if (!adev->ras_enabled || !con)
c030f2e4 2527 return 0;
2528
2529 amdgpu_ras_fs_fini(adev);
2530 amdgpu_ras_interrupt_remove_all(adev);
2531
2532 WARN(con->features, "Feature mask is not cleared");
2533
2534 if (con->features)
2535 amdgpu_ras_disable_all_features(adev, 1);
2536
05adfd80
LT
2537 cancel_delayed_work_sync(&con->ras_counte_delay_work);
2538
c030f2e4 2539 amdgpu_ras_set_context(adev, NULL);
2540 kfree(con);
2541
2542 return 0;
2543}
7c6e68c7
AG
2544
2545void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
2546{
e509965e 2547 amdgpu_ras_check_supported(adev);
8ab0d6f0 2548 if (!adev->ras_hw_enabled)
ed606f8a
AG
2549 return;
2550
7c6e68c7 2551 if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
6952e99c
GC
2552 dev_info(adev->dev, "uncorrectable hardware error"
2553 "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
d5ea093e 2554
61934624 2555 amdgpu_ras_reset_gpu(adev);
7c6e68c7
AG
2556 }
2557}
bb5c7235
WS
2558
2559bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
2560{
2561 if (adev->asic_type == CHIP_VEGA20 &&
2562 adev->pm.fw_version <= 0x283400) {
2563 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
2564 amdgpu_ras_intr_triggered();
2565 }
2566
2567 return false;
2568}
970fd197
SY
2569
2570void amdgpu_release_ras_context(struct amdgpu_device *adev)
2571{
2572 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2573
2574 if (!con)
2575 return;
2576
8ab0d6f0 2577 if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
970fd197
SY
2578 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
2579 amdgpu_ras_set_context(adev, NULL);
2580 kfree(con);
2581 }
2582}
12b2cab7
MJ
2583
2584#ifdef CONFIG_X86_MCE_AMD
2585static struct amdgpu_device *find_adev(uint32_t node_id)
2586{
12b2cab7
MJ
2587 int i;
2588 struct amdgpu_device *adev = NULL;
2589
91a1a52d
MJ
2590 for (i = 0; i < mce_adev_list.num_gpu; i++) {
2591 adev = mce_adev_list.devs[i];
12b2cab7 2592
91a1a52d 2593 if (adev && adev->gmc.xgmi.connected_to_cpu &&
12b2cab7
MJ
2594 adev->gmc.xgmi.physical_node_id == node_id)
2595 break;
2596 adev = NULL;
2597 }
2598
12b2cab7
MJ
2599 return adev;
2600}
2601
2602#define GET_MCA_IPID_GPUID(m) (((m) >> 44) & 0xF)
2603#define GET_UMC_INST(m) (((m) >> 21) & 0x7)
2604#define GET_CHAN_INDEX(m) ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
2605#define GPU_ID_OFFSET 8
2606
2607static int amdgpu_bad_page_notifier(struct notifier_block *nb,
2608 unsigned long val, void *data)
2609{
2610 struct mce *m = (struct mce *)data;
2611 struct amdgpu_device *adev = NULL;
2612 uint32_t gpu_id = 0;
2613 uint32_t umc_inst = 0;
2614 uint32_t ch_inst, channel_index = 0;
2615 struct ras_err_data err_data = {0, 0, 0, NULL};
2616 struct eeprom_table_record err_rec;
2617 uint64_t retired_page;
2618
2619 /*
2620 * If the error was generated in UMC_V2, which belongs to GPU UMCs,
2621 * and error occurred in DramECC (Extended error code = 0) then only
2622 * process the error, else bail out.
2623 */
2624 if (!m || !((smca_get_bank_type(m->bank) == SMCA_UMC_V2) &&
2625 (XEC(m->status, 0x3f) == 0x0)))
2626 return NOTIFY_DONE;
2627
2628 /*
2629 * If it is correctable error, return.
2630 */
2631 if (mce_is_correctable(m))
2632 return NOTIFY_OK;
2633
2634 /*
2635 * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
2636 */
2637 gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
2638
2639 adev = find_adev(gpu_id);
2640 if (!adev) {
2641 DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
2642 gpu_id);
2643 return NOTIFY_DONE;
2644 }
2645
2646 /*
2647 * If it is uncorrectable error, then find out UMC instance and
2648 * channel index.
2649 */
2650 umc_inst = GET_UMC_INST(m->ipid);
2651 ch_inst = GET_CHAN_INDEX(m->ipid);
2652
2653 dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
2654 umc_inst, ch_inst);
2655
2656 memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
2657
2658 /*
2659 * Translate UMC channel address to Physical address
2660 */
2661 channel_index =
2662 adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num
2663 + ch_inst];
2664
2665 retired_page = ADDR_OF_8KB_BLOCK(m->addr) |
2666 ADDR_OF_256B_BLOCK(channel_index) |
2667 OFFSET_IN_256B_BLOCK(m->addr);
2668
2669 err_rec.address = m->addr;
2670 err_rec.retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
2671 err_rec.ts = (uint64_t)ktime_get_real_seconds();
2672 err_rec.err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
2673 err_rec.cu = 0;
2674 err_rec.mem_channel = channel_index;
2675 err_rec.mcumc_id = umc_inst;
2676
2677 err_data.err_addr = &err_rec;
2678 err_data.err_addr_cnt = 1;
2679
2680 if (amdgpu_bad_page_threshold != 0) {
2681 amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
2682 err_data.err_addr_cnt);
2683 amdgpu_ras_save_bad_pages(adev);
2684 }
2685
2686 return NOTIFY_OK;
2687}
2688
2689static struct notifier_block amdgpu_bad_page_nb = {
2690 .notifier_call = amdgpu_bad_page_notifier,
2691 .priority = MCE_PRIO_UC,
2692};
2693
91a1a52d 2694static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
12b2cab7 2695{
91a1a52d
MJ
2696 /*
2697 * Add the adev to the mce_adev_list.
2698 * During mode2 reset, amdgpu device is temporarily
2699 * removed from the mgpu_info list which can cause
2700 * page retirement to fail.
2701 * Use this list instead of mgpu_info to find the amdgpu
2702 * device on which the UMC error was reported.
2703 */
2704 mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
2705
12b2cab7
MJ
2706 /*
2707 * Register the x86 notifier only once
2708 * with MCE subsystem.
2709 */
2710 if (notifier_registered == false) {
2711 mce_register_decode_chain(&amdgpu_bad_page_nb);
2712 notifier_registered = true;
2713 }
2714}
2715#endif
7cab2124 2716
2717struct amdgpu_ras* amdgpu_ras_get_context(struct amdgpu_device *adev)
2718{
2719 if (!adev)
2720 return NULL;
2721
2722 return adev->psp.ras_context.ras;
2723}
2724
2725int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras* ras_con)
2726{
2727 if (!adev)
69f91d32 2728 return -EINVAL;
7cab2124 2729
2730 adev->psp.ras_context.ras = ras_con;
2731 return 0;
2732}
2733
2734/* check if ras is supported on block, say, sdma, gfx */
2735int amdgpu_ras_is_supported(struct amdgpu_device *adev,
2736 unsigned int block)
2737{
2738 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2739
2740 if (block >= AMDGPU_RAS_BLOCK_COUNT)
2741 return 0;
2742 return ras && (adev->ras_enabled & (1 << block));
2743}
2744
2745int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
2746{
2747 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2748
2749 if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
2750 schedule_work(&ras->recovery_work);
2751 return 0;
2752}
2753
2754
6492e1b0 2755/* Register each ip ras block into amdgpu ras */
2756int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
2757 struct amdgpu_ras_block_object* ras_block_obj)
2758{
df4f0041 2759 struct amdgpu_ras_block_object *obj, *tmp;
6492e1b0 2760 if (!adev || !ras_block_obj)
2761 return -EINVAL;
2762
df01fe73 2763 if (!amdgpu_ras_asic_supported(adev))
2764 return 0;
2765
df4f0041 2766 /* If the ras object is in ras_list, don't add it again */
2767 list_for_each_entry_safe(obj, tmp, &adev->ras_list, node) {
2768 if (obj == ras_block_obj) {
2769 return 0;
2770 }
2771 }
2772
6492e1b0 2773 INIT_LIST_HEAD(&ras_block_obj->node);
2774 list_add_tail(&ras_block_obj->node, &adev->ras_list);
2775
2776 return 0;
2777}