2 * Copyright 2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
32 #include "amdgpu_ras.h"
33 #include "amdgpu_atomfirmware.h"
34 #include "amdgpu_xgmi.h"
35 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
37 static const char *RAS_FS_NAME = "ras";
39 const char *ras_error_string[] = {
43 "multi_uncorrectable",
47 const char *ras_block_string[] = {
64 #define ras_err_str(i) (ras_error_string[ffs(i)])
65 #define ras_block_str(i) (ras_block_string[i])
67 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
69 /* inject address is 52 bits */
70 #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
72 /* typical ECC bad page rate(1 bad page per 100MB VRAM) */
73 #define RAS_BAD_PAGE_RATE (100 * 1024 * 1024ULL)
75 enum amdgpu_ras_retire_page_reservation {
76 AMDGPU_RAS_RETIRE_PAGE_RESERVED,
77 AMDGPU_RAS_RETIRE_PAGE_PENDING,
78 AMDGPU_RAS_RETIRE_PAGE_FAULT,
81 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
83 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
86 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
88 if (adev && amdgpu_ras_get_context(adev))
89 amdgpu_ras_get_context(adev)->error_query_ready = ready;
92 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
94 if (adev && amdgpu_ras_get_context(adev))
95 return amdgpu_ras_get_context(adev)->error_query_ready;
100 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
101 size_t size, loff_t *pos)
103 struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
104 struct ras_query_if info = {
110 if (amdgpu_ras_error_query(obj->adev, &info))
113 s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
115 "ce", info.ce_count);
120 s = min_t(u64, s, size);
123 if (copy_to_user(buf, &val[*pos], s))
131 static const struct file_operations amdgpu_ras_debugfs_ops = {
132 .owner = THIS_MODULE,
133 .read = amdgpu_ras_debugfs_read,
135 .llseek = default_llseek
138 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
142 for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
144 if (strcmp(name, ras_block_str(i)) == 0)
150 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
151 const char __user *buf, size_t size,
152 loff_t *pos, struct ras_debug_if *data)
154 ssize_t s = min_t(u64, 64, size);
167 memset(str, 0, sizeof(str));
168 memset(data, 0, sizeof(*data));
170 if (copy_from_user(str, buf, s))
173 if (sscanf(str, "disable %32s", block_name) == 1)
175 else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
177 else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
179 else if (str[0] && str[1] && str[2] && str[3])
180 /* ascii string, but commands are not matched. */
184 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
187 data->head.block = block_id;
188 /* only ue and ce errors are supported */
189 if (!memcmp("ue", err, 2))
190 data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
191 else if (!memcmp("ce", err, 2))
192 data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
199 if (sscanf(str, "%*s %*s %*s %u %llu %llu",
200 &sub_block, &address, &value) != 3)
201 if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
202 &sub_block, &address, &value) != 3)
204 data->head.sub_block_index = sub_block;
205 data->inject.address = address;
206 data->inject.value = value;
209 if (size < sizeof(*data))
212 if (copy_from_user(data, buf, sizeof(*data)))
220 * DOC: AMDGPU RAS debugfs control interface
222 * It accepts struct ras_debug_if who has two members.
224 * First member: ras_debug_if::head or ras_debug_if::inject.
226 * head is used to indicate which IP block will be under control.
228 * head has four members, they are block, type, sub_block_index, name.
229 * block: which IP will be under control.
230 * type: what kind of error will be enabled/disabled/injected.
231 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
232 * name: the name of IP.
234 * inject has two more members than head, they are address, value.
235 * As their names indicate, inject operation will write the
236 * value to the address.
238 * The second member: struct ras_debug_if::op.
239 * It has three kinds of operations.
241 * - 0: disable RAS on the block. Take ::head as its data.
242 * - 1: enable RAS on the block. Take ::head as its data.
243 * - 2: inject errors on the block. Take ::inject as its data.
245 * How to use the interface?
249 * Copy the struct ras_debug_if in your codes and initialize it.
250 * Write the struct to the control node.
254 * .. code-block:: bash
256 * echo op block [error [sub_block address value]] > .../ras/ras_ctrl
260 * op: disable, enable, inject
261 * disable: only block is needed
262 * enable: block and error are needed
263 * inject: error, address, value are needed
264 * block: umc, sdma, gfx, .........
265 * see ras_block_string[] for details
267 * ue: multi_uncorrectable
268 * ce: single_correctable
270 * sub block index, pass 0 if there is no sub block
272 * here are some examples for bash commands:
274 * .. code-block:: bash
276 * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
277 * echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
278 * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
280 * How to check the result?
282 * For disable/enable, please check ras features at
283 * /sys/class/drm/card[0/1/2...]/device/ras/features
285 * For inject, please check corresponding err count at
286 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
289 * Operations are only allowed on blocks which are supported.
290 * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask
291 * to see which blocks support RAS on a particular asic.
294 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *buf,
295 size_t size, loff_t *pos)
297 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
298 struct ras_debug_if data;
301 if (!amdgpu_ras_get_error_query_ready(adev)) {
302 dev_warn(adev->dev, "RAS WARN: error injection "
303 "currently inaccessible\n");
307 ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
311 if (!amdgpu_ras_is_supported(adev, data.head.block))
316 ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
319 ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
322 if ((data.inject.address >= adev->gmc.mc_vram_size) ||
323 (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
324 dev_warn(adev->dev, "RAS WARN: input address "
325 "0x%llx is invalid.",
326 data.inject.address);
331 /* umc ce/ue error injection for a bad page is not allowed */
332 if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
333 amdgpu_ras_check_bad_page(adev, data.inject.address)) {
334 dev_warn(adev->dev, "RAS WARN: 0x%llx has been marked "
335 "as bad before error injection!\n",
336 data.inject.address);
340 /* data.inject.address is offset instead of absolute gpu address */
341 ret = amdgpu_ras_error_inject(adev, &data.inject);
355 * DOC: AMDGPU RAS debugfs EEPROM table reset interface
357 * Some boards contain an EEPROM which is used to persistently store a list of
358 * bad pages which experiences ECC errors in vram. This interface provides
359 * a way to reset the EEPROM, e.g., after testing error injection.
363 * .. code-block:: bash
365 * echo 1 > ../ras/ras_eeprom_reset
367 * will reset EEPROM table to 0 entries.
370 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf,
371 size_t size, loff_t *pos)
373 struct amdgpu_device *adev =
374 (struct amdgpu_device *)file_inode(f)->i_private;
377 ret = amdgpu_ras_eeprom_reset_table(
378 &(amdgpu_ras_get_context(adev)->eeprom_control));
381 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
388 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
389 .owner = THIS_MODULE,
391 .write = amdgpu_ras_debugfs_ctrl_write,
392 .llseek = default_llseek
395 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
396 .owner = THIS_MODULE,
398 .write = amdgpu_ras_debugfs_eeprom_write,
399 .llseek = default_llseek
403 * DOC: AMDGPU RAS sysfs Error Count Interface
405 * It allows the user to read the error count for each IP block on the gpu through
406 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
408 * It outputs the multiple lines which report the uncorrected (ue) and corrected
411 * The format of one line is below,
417 * .. code-block:: bash
423 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
424 struct device_attribute *attr, char *buf)
426 struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
427 struct ras_query_if info = {
431 if (!amdgpu_ras_get_error_query_ready(obj->adev))
432 return snprintf(buf, PAGE_SIZE,
433 "Query currently inaccessible\n");
435 if (amdgpu_ras_error_query(obj->adev, &info))
438 return snprintf(buf, PAGE_SIZE, "%s: %lu\n%s: %lu\n",
440 "ce", info.ce_count);
445 #define get_obj(obj) do { (obj)->use++; } while (0)
446 #define alive_obj(obj) ((obj)->use)
448 static inline void put_obj(struct ras_manager *obj)
450 if (obj && --obj->use == 0)
451 list_del(&obj->node);
452 if (obj && obj->use < 0) {
453 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name);
457 /* make one obj and return it. */
458 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
459 struct ras_common_if *head)
461 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
462 struct ras_manager *obj;
467 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
470 obj = &con->objs[head->block];
471 /* already exist. return obj? */
477 list_add(&obj->node, &con->head);
483 /* return an obj equal to head, or the first when head is NULL */
484 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
485 struct ras_common_if *head)
487 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
488 struct ras_manager *obj;
495 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
498 obj = &con->objs[head->block];
500 if (alive_obj(obj)) {
501 WARN_ON(head->block != obj->head.block);
505 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
507 if (alive_obj(obj)) {
508 WARN_ON(i != obj->head.block);
518 static void amdgpu_ras_parse_status_code(struct amdgpu_device *adev,
519 const char* invoke_type,
520 const char* block_name,
521 enum ta_ras_status ret)
524 case TA_RAS_STATUS__SUCCESS:
526 case TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE:
528 "RAS WARN: %s %s currently unavailable\n",
534 "RAS ERROR: %s %s error failed ret 0x%X\n",
541 /* feature ctl begin */
542 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
543 struct ras_common_if *head)
545 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
547 return con->hw_supported & BIT(head->block);
550 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
551 struct ras_common_if *head)
553 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
555 return con->features & BIT(head->block);
559 * if obj is not created, then create one.
560 * set feature enable flag.
562 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
563 struct ras_common_if *head, int enable)
565 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
566 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
568 /* If hardware does not support ras, then do not create obj.
569 * But if hardware support ras, we can create the obj.
570 * Ras framework checks con->hw_supported to see if it need do
571 * corresponding initialization.
572 * IP checks con->support to see if it need disable ras.
574 if (!amdgpu_ras_is_feature_allowed(adev, head))
576 if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
581 obj = amdgpu_ras_create_obj(adev, head);
585 /* In case we create obj somewhere else */
588 con->features |= BIT(head->block);
590 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
591 con->features &= ~BIT(head->block);
599 /* wrapper of psp_ras_enable_features */
600 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
601 struct ras_common_if *head, bool enable)
603 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
604 union ta_ras_cmd_input *info;
610 info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
615 info->disable_features = (struct ta_ras_disable_features_input) {
616 .block_id = amdgpu_ras_block_to_ta(head->block),
617 .error_type = amdgpu_ras_error_to_ta(head->type),
620 info->enable_features = (struct ta_ras_enable_features_input) {
621 .block_id = amdgpu_ras_block_to_ta(head->block),
622 .error_type = amdgpu_ras_error_to_ta(head->type),
626 /* Do not enable if it is not allowed. */
627 WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
628 /* Are we alerady in that state we are going to set? */
629 if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) {
634 if (!amdgpu_ras_intr_triggered()) {
635 ret = psp_ras_enable_features(&adev->psp, info, enable);
637 amdgpu_ras_parse_status_code(adev,
638 enable ? "enable":"disable",
639 ras_block_str(head->block),
640 (enum ta_ras_status)ret);
641 if (ret == TA_RAS_STATUS__RESET_NEEDED)
651 __amdgpu_ras_feature_enable(adev, head, enable);
658 /* Only used in device probe stage and called only once. */
659 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
660 struct ras_common_if *head, bool enable)
662 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
668 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
670 /* There is no harm to issue a ras TA cmd regardless of
671 * the currecnt ras state.
672 * If current state == target state, it will do nothing
673 * But sometimes it requests driver to reset and repost
674 * with error code -EAGAIN.
676 ret = amdgpu_ras_feature_enable(adev, head, 1);
677 /* With old ras TA, we might fail to enable ras.
678 * Log it and just setup the object.
679 * TODO need remove this WA in the future.
681 if (ret == -EINVAL) {
682 ret = __amdgpu_ras_feature_enable(adev, head, 1);
685 "RAS INFO: %s setup object\n",
686 ras_block_str(head->block));
689 /* setup the object then issue a ras TA disable cmd.*/
690 ret = __amdgpu_ras_feature_enable(adev, head, 1);
694 ret = amdgpu_ras_feature_enable(adev, head, 0);
697 ret = amdgpu_ras_feature_enable(adev, head, enable);
702 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
705 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
706 struct ras_manager *obj, *tmp;
708 list_for_each_entry_safe(obj, tmp, &con->head, node) {
710 * aka just release the obj and corresponding flags
713 if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
716 if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
721 return con->features;
724 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
727 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
728 int ras_block_count = AMDGPU_RAS_BLOCK_COUNT;
730 const enum amdgpu_ras_error_type default_ras_type =
731 AMDGPU_RAS_ERROR__NONE;
733 for (i = 0; i < ras_block_count; i++) {
734 struct ras_common_if head = {
736 .type = default_ras_type,
737 .sub_block_index = 0,
739 strcpy(head.name, ras_block_str(i));
742 * bypass psp. vbios enable ras for us.
743 * so just create the obj
745 if (__amdgpu_ras_feature_enable(adev, &head, 1))
748 if (amdgpu_ras_feature_enable(adev, &head, 1))
753 return con->features;
755 /* feature ctl end */
757 /* query/inject/cure begin */
758 int amdgpu_ras_error_query(struct amdgpu_device *adev,
759 struct ras_query_if *info)
761 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
762 struct ras_err_data err_data = {0, 0, 0, NULL};
768 switch (info->head.block) {
769 case AMDGPU_RAS_BLOCK__UMC:
770 if (adev->umc.funcs->query_ras_error_count)
771 adev->umc.funcs->query_ras_error_count(adev, &err_data);
772 /* umc query_ras_error_address is also responsible for clearing
775 if (adev->umc.funcs->query_ras_error_address)
776 adev->umc.funcs->query_ras_error_address(adev, &err_data);
778 case AMDGPU_RAS_BLOCK__SDMA:
779 if (adev->sdma.funcs->query_ras_error_count) {
780 for (i = 0; i < adev->sdma.num_instances; i++)
781 adev->sdma.funcs->query_ras_error_count(adev, i,
785 case AMDGPU_RAS_BLOCK__GFX:
786 if (adev->gfx.funcs->query_ras_error_count)
787 adev->gfx.funcs->query_ras_error_count(adev, &err_data);
789 case AMDGPU_RAS_BLOCK__MMHUB:
790 if (adev->mmhub.funcs->query_ras_error_count)
791 adev->mmhub.funcs->query_ras_error_count(adev, &err_data);
793 case AMDGPU_RAS_BLOCK__PCIE_BIF:
794 if (adev->nbio.funcs->query_ras_error_count)
795 adev->nbio.funcs->query_ras_error_count(adev, &err_data);
797 case AMDGPU_RAS_BLOCK__XGMI_WAFL:
798 amdgpu_xgmi_query_ras_error_count(adev, &err_data);
804 obj->err_data.ue_count += err_data.ue_count;
805 obj->err_data.ce_count += err_data.ce_count;
807 info->ue_count = obj->err_data.ue_count;
808 info->ce_count = obj->err_data.ce_count;
810 if (err_data.ce_count) {
811 dev_info(adev->dev, "%ld correctable hardware errors "
812 "detected in %s block, no user "
813 "action is needed.\n",
814 obj->err_data.ce_count,
815 ras_block_str(info->head.block));
817 if (err_data.ue_count) {
818 dev_info(adev->dev, "%ld uncorrectable hardware errors "
819 "detected in %s block\n",
820 obj->err_data.ue_count,
821 ras_block_str(info->head.block));
827 /* Trigger XGMI/WAFL error */
828 static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev,
829 struct ta_ras_trigger_error_input *block_info)
833 if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
834 dev_warn(adev->dev, "Failed to disallow df cstate");
836 if (amdgpu_dpm_allow_xgmi_power_down(adev, false))
837 dev_warn(adev->dev, "Failed to disallow XGMI power down");
839 ret = psp_ras_trigger_error(&adev->psp, block_info);
841 if (amdgpu_ras_intr_triggered())
844 if (amdgpu_dpm_allow_xgmi_power_down(adev, true))
845 dev_warn(adev->dev, "Failed to allow XGMI power down");
847 if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
848 dev_warn(adev->dev, "Failed to allow df cstate");
853 /* wrapper of psp_ras_trigger_error */
854 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
855 struct ras_inject_if *info)
857 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
858 struct ta_ras_trigger_error_input block_info = {
859 .block_id = amdgpu_ras_block_to_ta(info->head.block),
860 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
861 .sub_block_index = info->head.sub_block_index,
862 .address = info->address,
863 .value = info->value,
870 /* Calculate XGMI relative offset */
871 if (adev->gmc.xgmi.num_physical_nodes > 1) {
873 amdgpu_xgmi_get_relative_phy_addr(adev,
877 switch (info->head.block) {
878 case AMDGPU_RAS_BLOCK__GFX:
879 if (adev->gfx.funcs->ras_error_inject)
880 ret = adev->gfx.funcs->ras_error_inject(adev, info);
884 case AMDGPU_RAS_BLOCK__UMC:
885 case AMDGPU_RAS_BLOCK__MMHUB:
886 case AMDGPU_RAS_BLOCK__PCIE_BIF:
887 ret = psp_ras_trigger_error(&adev->psp, &block_info);
889 case AMDGPU_RAS_BLOCK__XGMI_WAFL:
890 ret = amdgpu_ras_error_inject_xgmi(adev, &block_info);
893 dev_info(adev->dev, "%s error injection is not supported yet\n",
894 ras_block_str(info->head.block));
898 amdgpu_ras_parse_status_code(adev,
900 ras_block_str(info->head.block),
901 (enum ta_ras_status)ret);
906 int amdgpu_ras_error_cure(struct amdgpu_device *adev,
907 struct ras_cure_if *info)
909 /* psp fw has no cure interface for now. */
913 /* get the total error counts on all IPs */
914 unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
917 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
918 struct ras_manager *obj;
919 struct ras_err_data data = {0, 0};
924 list_for_each_entry(obj, &con->head, node) {
925 struct ras_query_if info = {
929 if (amdgpu_ras_error_query(adev, &info))
932 data.ce_count += info.ce_count;
933 data.ue_count += info.ue_count;
936 return is_ce ? data.ce_count : data.ue_count;
938 /* query/inject/cure end */
943 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
944 struct ras_badpage **bps, unsigned int *count);
946 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
949 case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
951 case AMDGPU_RAS_RETIRE_PAGE_PENDING:
953 case AMDGPU_RAS_RETIRE_PAGE_FAULT:
960 * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
962 * It allows user to read the bad pages of vram on the gpu through
963 * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
965 * It outputs multiple lines, and each line stands for one gpu page.
967 * The format of one line is below,
968 * gpu pfn : gpu page size : flags
970 * gpu pfn and gpu page size are printed in hex format.
971 * flags can be one of below character,
973 * R: reserved, this gpu page is reserved and not able to use.
975 * P: pending for reserve, this gpu page is marked as bad, will be reserved
976 * in next window of page_reserve.
978 * F: unable to reserve. this gpu page can't be reserved due to some reasons.
982 * .. code-block:: bash
984 * 0x00000001 : 0x00001000 : R
985 * 0x00000002 : 0x00001000 : P
989 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
990 struct kobject *kobj, struct bin_attribute *attr,
991 char *buf, loff_t ppos, size_t count)
993 struct amdgpu_ras *con =
994 container_of(attr, struct amdgpu_ras, badpages_attr);
995 struct amdgpu_device *adev = con->adev;
996 const unsigned int element_size =
997 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
998 unsigned int start = div64_ul(ppos + element_size - 1, element_size);
999 unsigned int end = div64_ul(ppos + count - 1, element_size);
1001 struct ras_badpage *bps = NULL;
1002 unsigned int bps_count = 0;
1004 memset(buf, 0, count);
1006 if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1009 for (; start < end && start < bps_count; start++)
1010 s += scnprintf(&buf[s], element_size + 1,
1011 "0x%08x : 0x%08x : %1s\n",
1014 amdgpu_ras_badpage_flags_str(bps[start].flags));
1021 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1022 struct device_attribute *attr, char *buf)
1024 struct amdgpu_ras *con =
1025 container_of(attr, struct amdgpu_ras, features_attr);
1027 return scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features);
1030 static void amdgpu_ras_sysfs_add_bad_page_node(struct amdgpu_device *adev)
1032 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1033 struct attribute_group group;
1034 struct bin_attribute *bin_attrs[] = {
1035 &con->badpages_attr,
1039 con->badpages_attr = (struct bin_attribute) {
1041 .name = "gpu_vram_bad_pages",
1046 .read = amdgpu_ras_sysfs_badpages_read,
1049 group.name = RAS_FS_NAME;
1050 group.bin_attrs = bin_attrs;
1052 sysfs_bin_attr_init(bin_attrs[0]);
1054 sysfs_update_group(&adev->dev->kobj, &group);
1057 static int amdgpu_ras_sysfs_create_feature_node(struct amdgpu_device *adev)
1059 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1060 struct attribute *attrs[] = {
1061 &con->features_attr.attr,
1064 struct attribute_group group = {
1065 .name = RAS_FS_NAME,
1069 con->features_attr = (struct device_attribute) {
1074 .show = amdgpu_ras_sysfs_features_read,
1077 sysfs_attr_init(attrs[0]);
1079 return sysfs_create_group(&adev->dev->kobj, &group);
1082 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1084 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1086 sysfs_remove_file_from_group(&adev->dev->kobj,
1087 &con->badpages_attr.attr,
1091 static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
1093 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1094 struct attribute *attrs[] = {
1095 &con->features_attr.attr,
1098 struct attribute_group group = {
1099 .name = RAS_FS_NAME,
1103 sysfs_remove_group(&adev->dev->kobj, &group);
1108 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1109 struct ras_fs_if *head)
1111 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1113 if (!obj || obj->attr_inuse)
1118 memcpy(obj->fs_data.sysfs_name,
1120 sizeof(obj->fs_data.sysfs_name));
1122 obj->sysfs_attr = (struct device_attribute){
1124 .name = obj->fs_data.sysfs_name,
1127 .show = amdgpu_ras_sysfs_read,
1129 sysfs_attr_init(&obj->sysfs_attr.attr);
1131 if (sysfs_add_file_to_group(&adev->dev->kobj,
1132 &obj->sysfs_attr.attr,
1138 obj->attr_inuse = 1;
1143 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1144 struct ras_common_if *head)
1146 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1148 if (!obj || !obj->attr_inuse)
1151 sysfs_remove_file_from_group(&adev->dev->kobj,
1152 &obj->sysfs_attr.attr,
1154 obj->attr_inuse = 0;
1160 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1162 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1163 struct ras_manager *obj, *tmp;
1165 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1166 amdgpu_ras_sysfs_remove(adev, &obj->head);
1169 if (amdgpu_bad_page_threshold != 0)
1170 amdgpu_ras_sysfs_remove_bad_page_node(adev);
1172 amdgpu_ras_sysfs_remove_feature_node(adev);
1179 * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1181 * Normally when there is an uncorrectable error, the driver will reset
1182 * the GPU to recover. However, in the event of an unrecoverable error,
1183 * the driver provides an interface to reboot the system automatically
1186 * The following file in debugfs provides that interface:
1187 * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1191 * .. code-block:: bash
1193 * echo true > .../ras/auto_reboot
1197 static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1199 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1200 struct drm_minor *minor = adev->ddev->primary;
1202 con->dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1203 debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir,
1204 adev, &amdgpu_ras_debugfs_ctrl_ops);
1205 debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, con->dir,
1206 adev, &amdgpu_ras_debugfs_eeprom_ops);
1209 * After one uncorrectable error happens, usually GPU recovery will
1210 * be scheduled. But due to the known problem in GPU recovery failing
1211 * to bring GPU back, below interface provides one direct way to
1212 * user to reboot system automatically in such case within
1213 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1214 * will never be called.
1216 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, con->dir,
1220 * User could set this not to clean up hardware's error count register
1221 * of RAS IPs during ras recovery.
1223 debugfs_create_bool("disable_ras_err_cnt_harvest", 0644,
1224 con->dir, &con->disable_ras_err_cnt_harvest);
1227 void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1228 struct ras_fs_if *head)
1230 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1231 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1233 if (!obj || obj->ent)
1238 memcpy(obj->fs_data.debugfs_name,
1240 sizeof(obj->fs_data.debugfs_name));
1242 obj->ent = debugfs_create_file(obj->fs_data.debugfs_name,
1243 S_IWUGO | S_IRUGO, con->dir, obj,
1244 &amdgpu_ras_debugfs_ops);
1247 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
1249 #if defined(CONFIG_DEBUG_FS)
1250 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1251 struct ras_manager *obj;
1252 struct ras_fs_if fs_info;
1255 * it won't be called in resume path, no need to check
1256 * suspend and gpu reset status
1261 amdgpu_ras_debugfs_create_ctrl_node(adev);
1263 list_for_each_entry(obj, &con->head, node) {
1264 if (amdgpu_ras_is_supported(adev, obj->head.block) &&
1265 (obj->attr_inuse == 1)) {
1266 sprintf(fs_info.debugfs_name, "%s_err_inject",
1267 ras_block_str(obj->head.block));
1268 fs_info.head = obj->head;
1269 amdgpu_ras_debugfs_create(adev, &fs_info);
1275 void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
1276 struct ras_common_if *head)
1278 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1280 if (!obj || !obj->ent)
1287 static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
1289 #if defined(CONFIG_DEBUG_FS)
1290 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1291 struct ras_manager *obj, *tmp;
1293 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1294 amdgpu_ras_debugfs_remove(adev, &obj->head);
1304 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
1306 amdgpu_ras_sysfs_create_feature_node(adev);
1308 if (amdgpu_bad_page_threshold != 0)
1309 amdgpu_ras_sysfs_add_bad_page_node(adev);
1314 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
1316 amdgpu_ras_debugfs_remove_all(adev);
1317 amdgpu_ras_sysfs_remove_all(adev);
1323 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
1325 struct ras_ih_data *data = &obj->ih_data;
1326 struct amdgpu_iv_entry entry;
1328 struct ras_err_data err_data = {0, 0, 0, NULL};
1330 while (data->rptr != data->wptr) {
1332 memcpy(&entry, &data->ring[data->rptr],
1333 data->element_size);
1336 data->rptr = (data->aligned_element_size +
1337 data->rptr) % data->ring_size;
1339 /* Let IP handle its data, maybe we need get the output
1340 * from the callback to udpate the error type/count, etc
1343 ret = data->cb(obj->adev, &err_data, &entry);
1344 /* ue will trigger an interrupt, and in that case
1345 * we need do a reset to recovery the whole system.
1346 * But leave IP do that recovery, here we just dispatch
1349 if (ret == AMDGPU_RAS_SUCCESS) {
1350 /* these counts could be left as 0 if
1351 * some blocks do not count error number
1353 obj->err_data.ue_count += err_data.ue_count;
1354 obj->err_data.ce_count += err_data.ce_count;
1360 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
1362 struct ras_ih_data *data =
1363 container_of(work, struct ras_ih_data, ih_work);
1364 struct ras_manager *obj =
1365 container_of(data, struct ras_manager, ih_data);
1367 amdgpu_ras_interrupt_handler(obj);
1370 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
1371 struct ras_dispatch_if *info)
1373 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1374 struct ras_ih_data *data = &obj->ih_data;
1379 if (data->inuse == 0)
1382 /* Might be overflow... */
1383 memcpy(&data->ring[data->wptr], info->entry,
1384 data->element_size);
1387 data->wptr = (data->aligned_element_size +
1388 data->wptr) % data->ring_size;
1390 schedule_work(&data->ih_work);
1395 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
1396 struct ras_ih_if *info)
1398 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1399 struct ras_ih_data *data;
1404 data = &obj->ih_data;
1405 if (data->inuse == 0)
1408 cancel_work_sync(&data->ih_work);
1411 memset(data, 0, sizeof(*data));
1417 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
1418 struct ras_ih_if *info)
1420 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1421 struct ras_ih_data *data;
1424 /* in case we registe the IH before enable ras feature */
1425 obj = amdgpu_ras_create_obj(adev, &info->head);
1431 data = &obj->ih_data;
1432 /* add the callback.etc */
1433 *data = (struct ras_ih_data) {
1436 .element_size = sizeof(struct amdgpu_iv_entry),
1441 INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
1443 data->aligned_element_size = ALIGN(data->element_size, 8);
1444 /* the ring can store 64 iv entries. */
1445 data->ring_size = 64 * data->aligned_element_size;
1446 data->ring = kmalloc(data->ring_size, GFP_KERNEL);
1458 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
1460 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1461 struct ras_manager *obj, *tmp;
1463 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1464 struct ras_ih_if info = {
1467 amdgpu_ras_interrupt_remove_handler(adev, &info);
1474 /* traversal all IPs except NBIO to query error counter */
1475 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
1477 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1478 struct ras_manager *obj;
1483 list_for_each_entry(obj, &con->head, node) {
1484 struct ras_query_if info = {
1489 * PCIE_BIF IP has one different isr by ras controller
1490 * interrupt, the specific ras counter query will be
1491 * done in that isr. So skip such block from common
1492 * sync flood interrupt isr calling.
1494 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
1497 amdgpu_ras_error_query(adev, &info);
1501 /* recovery begin */
1503 /* return 0 on success.
1504 * caller need free bps.
1506 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1507 struct ras_badpage **bps, unsigned int *count)
1509 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1510 struct ras_err_handler_data *data;
1514 if (!con || !con->eh_data || !bps || !count)
1517 mutex_lock(&con->recovery_lock);
1518 data = con->eh_data;
1519 if (!data || data->count == 0) {
1525 *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
1531 for (; i < data->count; i++) {
1532 (*bps)[i] = (struct ras_badpage){
1533 .bp = data->bps[i].retired_page,
1534 .size = AMDGPU_GPU_PAGE_SIZE,
1535 .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
1538 if (data->last_reserved <= i)
1539 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
1540 else if (data->bps_bo[i] == NULL)
1541 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
1544 *count = data->count;
1546 mutex_unlock(&con->recovery_lock);
1550 static void amdgpu_ras_do_recovery(struct work_struct *work)
1552 struct amdgpu_ras *ras =
1553 container_of(work, struct amdgpu_ras, recovery_work);
1554 struct amdgpu_device *remote_adev = NULL;
1555 struct amdgpu_device *adev = ras->adev;
1556 struct list_head device_list, *device_list_handle = NULL;
1557 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, false);
1559 if (!ras->disable_ras_err_cnt_harvest) {
1560 /* Build list of devices to query RAS related errors */
1561 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
1562 device_list_handle = &hive->device_list;
1564 INIT_LIST_HEAD(&device_list);
1565 list_add_tail(&adev->gmc.xgmi.head, &device_list);
1566 device_list_handle = &device_list;
1569 list_for_each_entry(remote_adev,
1570 device_list_handle, gmc.xgmi.head)
1571 amdgpu_ras_log_on_err_counter(remote_adev);
1574 if (amdgpu_device_should_recover_gpu(ras->adev))
1575 amdgpu_device_gpu_recover(ras->adev, NULL);
1576 atomic_set(&ras->in_recovery, 0);
1579 /* alloc/realloc bps array */
1580 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
1581 struct ras_err_handler_data *data, int pages)
1583 unsigned int old_space = data->count + data->space_left;
1584 unsigned int new_space = old_space + pages;
1585 unsigned int align_space = ALIGN(new_space, 512);
1586 void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
1587 struct amdgpu_bo **bps_bo =
1588 kmalloc(align_space * sizeof(*data->bps_bo), GFP_KERNEL);
1590 if (!bps || !bps_bo) {
1597 memcpy(bps, data->bps,
1598 data->count * sizeof(*data->bps));
1602 memcpy(bps_bo, data->bps_bo,
1603 data->count * sizeof(*data->bps_bo));
1604 kfree(data->bps_bo);
1608 data->bps_bo = bps_bo;
1609 data->space_left += align_space - old_space;
1613 /* it deal with vram only. */
1614 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
1615 struct eeprom_table_record *bps, int pages)
1617 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1618 struct ras_err_handler_data *data;
1621 if (!con || !con->eh_data || !bps || pages <= 0)
1624 mutex_lock(&con->recovery_lock);
1625 data = con->eh_data;
1629 if (data->space_left <= pages)
1630 if (amdgpu_ras_realloc_eh_data_space(adev, data, pages)) {
1635 memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps));
1636 data->count += pages;
1637 data->space_left -= pages;
1640 mutex_unlock(&con->recovery_lock);
1646 * write error record array to eeprom, the function should be
1647 * protected by recovery_lock
1649 static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
1651 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1652 struct ras_err_handler_data *data;
1653 struct amdgpu_ras_eeprom_control *control;
1656 if (!con || !con->eh_data)
1659 control = &con->eeprom_control;
1660 data = con->eh_data;
1661 save_count = data->count - control->num_recs;
1662 /* only new entries are saved */
1663 if (save_count > 0) {
1664 if (amdgpu_ras_eeprom_process_recods(control,
1665 &data->bps[control->num_recs],
1668 dev_err(adev->dev, "Failed to save EEPROM table data!");
1672 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
1679 * read error record array in eeprom and reserve enough space for
1680 * storing new bad pages
1682 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
1684 struct amdgpu_ras_eeprom_control *control =
1685 &adev->psp.ras.ras->eeprom_control;
1686 struct eeprom_table_record *bps = NULL;
1689 /* no bad page record, skip eeprom access */
1690 if (!control->num_recs || (amdgpu_bad_page_threshold == 0))
1693 bps = kcalloc(control->num_recs, sizeof(*bps), GFP_KERNEL);
1697 if (amdgpu_ras_eeprom_process_recods(control, bps, false,
1698 control->num_recs)) {
1699 dev_err(adev->dev, "Failed to load EEPROM table records!");
1704 ret = amdgpu_ras_add_bad_pages(adev, bps, control->num_recs);
1712 * check if an address belongs to bad page
1714 * Note: this check is only for umc block
1716 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
1719 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1720 struct ras_err_handler_data *data;
1724 if (!con || !con->eh_data)
1727 mutex_lock(&con->recovery_lock);
1728 data = con->eh_data;
1732 addr >>= AMDGPU_GPU_PAGE_SHIFT;
1733 for (i = 0; i < data->count; i++)
1734 if (addr == data->bps[i].retired_page) {
1740 mutex_unlock(&con->recovery_lock);
1744 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
1745 uint32_t max_length)
1747 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1748 int tmp_threshold = amdgpu_bad_page_threshold;
1752 * Justification of value bad_page_cnt_threshold in ras structure
1754 * Generally, -1 <= amdgpu_bad_page_threshold <= max record length
1755 * in eeprom, and introduce two scenarios accordingly.
1757 * Bad page retirement enablement:
1758 * - If amdgpu_bad_page_threshold = -1,
1759 * bad_page_cnt_threshold = typical value by formula.
1761 * - When the value from user is 0 < amdgpu_bad_page_threshold <
1762 * max record length in eeprom, use it directly.
1764 * Bad page retirement disablement:
1765 * - If amdgpu_bad_page_threshold = 0, bad page retirement
1766 * functionality is disabled, and bad_page_cnt_threshold will
1770 if (tmp_threshold < -1)
1772 else if (tmp_threshold > max_length)
1773 tmp_threshold = max_length;
1775 if (tmp_threshold == -1) {
1776 val = adev->gmc.mc_vram_size;
1777 do_div(val, RAS_BAD_PAGE_RATE);
1778 con->bad_page_cnt_threshold = min(lower_32_bits(val),
1781 con->bad_page_cnt_threshold = tmp_threshold;
1785 /* called in gpu recovery/init */
1786 int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
1788 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1789 struct ras_err_handler_data *data;
1791 struct amdgpu_bo *bo = NULL;
1794 /* Not reserve bad page when amdgpu_bad_page_threshold == 0. */
1795 if (!con || !con->eh_data || (amdgpu_bad_page_threshold == 0))
1798 mutex_lock(&con->recovery_lock);
1799 data = con->eh_data;
1802 /* reserve vram at driver post stage. */
1803 for (i = data->last_reserved; i < data->count; i++) {
1804 bp = data->bps[i].retired_page;
1806 /* There are two cases of reserve error should be ignored:
1807 * 1) a ras bad page has been allocated (used by someone);
1808 * 2) a ras bad page has been reserved (duplicate error injection
1811 if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
1812 AMDGPU_GPU_PAGE_SIZE,
1813 AMDGPU_GEM_DOMAIN_VRAM,
1815 dev_warn(adev->dev, "RAS WARN: reserve vram for "
1816 "retired page %llx fail\n", bp);
1818 data->bps_bo[i] = bo;
1819 data->last_reserved = i + 1;
1823 /* continue to save bad pages to eeprom even reesrve_vram fails */
1824 ret = amdgpu_ras_save_bad_pages(adev);
1826 mutex_unlock(&con->recovery_lock);
1830 /* called when driver unload */
1831 static int amdgpu_ras_release_bad_pages(struct amdgpu_device *adev)
1833 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1834 struct ras_err_handler_data *data;
1835 struct amdgpu_bo *bo;
1838 if (!con || !con->eh_data)
1841 mutex_lock(&con->recovery_lock);
1842 data = con->eh_data;
1846 for (i = data->last_reserved - 1; i >= 0; i--) {
1847 bo = data->bps_bo[i];
1849 amdgpu_bo_free_kernel(&bo, NULL, NULL);
1851 data->bps_bo[i] = bo;
1852 data->last_reserved = i;
1855 mutex_unlock(&con->recovery_lock);
1859 int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
1861 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1862 struct ras_err_handler_data **data;
1863 uint32_t max_eeprom_records_len = 0;
1864 bool exc_err_limit = false;
1868 data = &con->eh_data;
1872 *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
1878 mutex_init(&con->recovery_lock);
1879 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
1880 atomic_set(&con->in_recovery, 0);
1883 max_eeprom_records_len = amdgpu_ras_eeprom_get_record_max_length();
1884 amdgpu_ras_validate_threshold(adev, max_eeprom_records_len);
1886 ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit);
1888 * This calling fails when exc_err_limit is true or
1891 if (exc_err_limit || ret)
1894 if (con->eeprom_control.num_recs) {
1895 ret = amdgpu_ras_load_bad_pages(adev);
1898 ret = amdgpu_ras_reserve_bad_pages(adev);
1906 amdgpu_ras_release_bad_pages(adev);
1908 kfree((*data)->bps);
1909 kfree((*data)->bps_bo);
1911 con->eh_data = NULL;
1913 dev_warn(adev->dev, "Failed to initialize ras recovery!\n");
1916 * Except error threshold exceeding case, other failure cases in this
1917 * function would not fail amdgpu driver init.
1927 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
1929 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1930 struct ras_err_handler_data *data = con->eh_data;
1932 /* recovery_init failed to init it, fini is useless */
1936 cancel_work_sync(&con->recovery_work);
1937 amdgpu_ras_release_bad_pages(adev);
1939 mutex_lock(&con->recovery_lock);
1940 con->eh_data = NULL;
1942 kfree(data->bps_bo);
1944 mutex_unlock(&con->recovery_lock);
1950 /* return 0 if ras will reset gpu and repost.*/
1951 int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
1954 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1959 ras->flags |= AMDGPU_RAS_FLAG_INIT_NEED_RESET;
1964 * check hardware's ras ability which will be saved in hw_supported.
1965 * if hardware does not support ras, we can skip some ras initializtion and
1966 * forbid some ras operations from IP.
1967 * if software itself, say boot parameter, limit the ras ability. We still
1968 * need allow IP do some limited operations, like disable. In such case,
1969 * we have to initialize ras as normal. but need check if operation is
1970 * allowed or not in each function.
1972 static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
1973 uint32_t *hw_supported, uint32_t *supported)
1978 if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw ||
1979 (adev->asic_type != CHIP_VEGA20 &&
1980 adev->asic_type != CHIP_ARCTURUS &&
1981 adev->asic_type != CHIP_SIENNA_CICHLID))
1984 if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
1985 dev_info(adev->dev, "HBM ECC is active.\n");
1986 *hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC |
1987 1 << AMDGPU_RAS_BLOCK__DF);
1989 dev_info(adev->dev, "HBM ECC is not presented.\n");
1991 if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
1992 dev_info(adev->dev, "SRAM ECC is active.\n");
1993 *hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
1994 1 << AMDGPU_RAS_BLOCK__DF);
1996 dev_info(adev->dev, "SRAM ECC is not presented.\n");
1998 /* hw_supported needs to be aligned with RAS block mask. */
1999 *hw_supported &= AMDGPU_RAS_BLOCK_MASK;
2001 *supported = amdgpu_ras_enable == 0 ?
2002 0 : *hw_supported & amdgpu_ras_mask;
2005 int amdgpu_ras_init(struct amdgpu_device *adev)
2007 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2013 con = kmalloc(sizeof(struct amdgpu_ras) +
2014 sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT,
2015 GFP_KERNEL|__GFP_ZERO);
2019 con->objs = (struct ras_manager *)(con + 1);
2021 amdgpu_ras_set_context(adev, con);
2023 amdgpu_ras_check_supported(adev, &con->hw_supported,
2025 if (!con->hw_supported) {
2031 INIT_LIST_HEAD(&con->head);
2032 /* Might need get this flag from vbios. */
2033 con->flags = RAS_DEFAULT_FLAGS;
2035 if (adev->nbio.funcs->init_ras_controller_interrupt) {
2036 r = adev->nbio.funcs->init_ras_controller_interrupt(adev);
2041 if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) {
2042 r = adev->nbio.funcs->init_ras_err_event_athub_interrupt(adev);
2047 if (amdgpu_ras_fs_init(adev)) {
2052 dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
2053 "hardware ability[%x] ras_mask[%x]\n",
2054 con->hw_supported, con->supported);
2057 amdgpu_ras_set_context(adev, NULL);
2063 /* helper function to handle common stuff in ip late init phase */
2064 int amdgpu_ras_late_init(struct amdgpu_device *adev,
2065 struct ras_common_if *ras_block,
2066 struct ras_fs_if *fs_info,
2067 struct ras_ih_if *ih_info)
2071 /* disable RAS feature per IP block if it is not supported */
2072 if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
2073 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
2077 r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
2080 /* request gpu reset. will run again */
2081 amdgpu_ras_request_reset_on_boot(adev,
2084 } else if (adev->in_suspend || adev->in_gpu_reset) {
2085 /* in resume phase, if fail to enable ras,
2086 * clean up all ras fs nodes, and disable ras */
2092 /* in resume phase, no need to create ras fs node */
2093 if (adev->in_suspend || adev->in_gpu_reset)
2097 r = amdgpu_ras_interrupt_add_handler(adev, ih_info);
2102 r = amdgpu_ras_sysfs_create(adev, fs_info);
2108 amdgpu_ras_sysfs_remove(adev, ras_block);
2111 amdgpu_ras_interrupt_remove_handler(adev, ih_info);
2113 amdgpu_ras_feature_enable(adev, ras_block, 0);
2117 /* helper function to remove ras fs node and interrupt handler */
2118 void amdgpu_ras_late_fini(struct amdgpu_device *adev,
2119 struct ras_common_if *ras_block,
2120 struct ras_ih_if *ih_info)
2122 if (!ras_block || !ih_info)
2125 amdgpu_ras_sysfs_remove(adev, ras_block);
2127 amdgpu_ras_interrupt_remove_handler(adev, ih_info);
2128 amdgpu_ras_feature_enable(adev, ras_block, 0);
2131 /* do some init work after IP late init as dependence.
2132 * and it runs in resume/gpu reset/booting up cases.
2134 void amdgpu_ras_resume(struct amdgpu_device *adev)
2136 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2137 struct ras_manager *obj, *tmp;
2142 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
2143 /* Set up all other IPs which are not implemented. There is a
2144 * tricky thing that IP's actual ras error type should be
2145 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
2146 * ERROR_NONE make sense anyway.
2148 amdgpu_ras_enable_all_features(adev, 1);
2150 /* We enable ras on all hw_supported block, but as boot
2151 * parameter might disable some of them and one or more IP has
2152 * not implemented yet. So we disable them on behalf.
2154 list_for_each_entry_safe(obj, tmp, &con->head, node) {
2155 if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
2156 amdgpu_ras_feature_enable(adev, &obj->head, 0);
2157 /* there should be no any reference. */
2158 WARN_ON(alive_obj(obj));
2163 if (con->flags & AMDGPU_RAS_FLAG_INIT_NEED_RESET) {
2164 con->flags &= ~AMDGPU_RAS_FLAG_INIT_NEED_RESET;
2165 /* setup ras obj state as disabled.
2166 * for init_by_vbios case.
2167 * if we want to enable ras, just enable it in a normal way.
2168 * If we want do disable it, need setup ras obj as enabled,
2169 * then issue another TA disable cmd.
2170 * See feature_enable_on_boot
2172 amdgpu_ras_disable_all_features(adev, 1);
2173 amdgpu_ras_reset_gpu(adev);
2177 void amdgpu_ras_suspend(struct amdgpu_device *adev)
2179 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2184 amdgpu_ras_disable_all_features(adev, 0);
2185 /* Make sure all ras objects are disabled. */
2187 amdgpu_ras_disable_all_features(adev, 1);
2190 /* do some fini work before IP fini as dependence */
2191 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
2193 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2198 /* Need disable ras on all IPs here before ip [hw/sw]fini */
2199 amdgpu_ras_disable_all_features(adev, 0);
2200 amdgpu_ras_recovery_fini(adev);
2204 int amdgpu_ras_fini(struct amdgpu_device *adev)
2206 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2211 amdgpu_ras_fs_fini(adev);
2212 amdgpu_ras_interrupt_remove_all(adev);
2214 WARN(con->features, "Feature mask is not cleared");
2217 amdgpu_ras_disable_all_features(adev, 1);
2219 amdgpu_ras_set_context(adev, NULL);
2225 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
2227 uint32_t hw_supported, supported;
2229 amdgpu_ras_check_supported(adev, &hw_supported, &supported);
2233 if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
2234 dev_info(adev->dev, "uncorrectable hardware error"
2235 "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
2237 amdgpu_ras_reset_gpu(adev);
2241 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
2243 if (adev->asic_type == CHIP_VEGA20 &&
2244 adev->pm.fw_version <= 0x283400) {
2245 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
2246 amdgpu_ras_intr_triggered();
2252 bool amdgpu_ras_check_err_threshold(struct amdgpu_device *adev)
2254 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2255 bool exc_err_limit = false;
2257 if (con && (amdgpu_bad_page_threshold != 0))
2258 amdgpu_ras_eeprom_check_err_threshold(&con->eeprom_control,
2262 * We are only interested in variable exc_err_limit,
2263 * as it says if GPU is in bad state or not.
2265 return exc_err_limit;