2 * Copyright 2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
32 #include "amdgpu_ras.h"
33 #include "amdgpu_atomfirmware.h"
34 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
36 const char *ras_error_string[] = {
40 "multi_uncorrectable",
44 const char *ras_block_string[] = {
61 #define ras_err_str(i) (ras_error_string[ffs(i)])
62 #define ras_block_str(i) (ras_block_string[i])
64 #define AMDGPU_RAS_FLAG_INIT_BY_VBIOS 1
65 #define AMDGPU_RAS_FLAG_INIT_NEED_RESET 2
66 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
68 /* inject address is 52 bits */
69 #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
72 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
74 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
75 size_t size, loff_t *pos)
77 struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
78 struct ras_query_if info = {
84 if (amdgpu_ras_error_query(obj->adev, &info))
87 s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
94 s = min_t(u64, s, size);
97 if (copy_to_user(buf, &val[*pos], s))
105 static const struct file_operations amdgpu_ras_debugfs_ops = {
106 .owner = THIS_MODULE,
107 .read = amdgpu_ras_debugfs_read,
109 .llseek = default_llseek
112 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
116 for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
118 if (strcmp(name, ras_block_str(i)) == 0)
124 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
125 const char __user *buf, size_t size,
126 loff_t *pos, struct ras_debug_if *data)
128 ssize_t s = min_t(u64, 64, size);
141 memset(str, 0, sizeof(str));
142 memset(data, 0, sizeof(*data));
144 if (copy_from_user(str, buf, s))
147 if (sscanf(str, "disable %32s", block_name) == 1)
149 else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
151 else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
153 else if (sscanf(str, "reboot %32s", block_name) == 1)
155 else if (str[0] && str[1] && str[2] && str[3])
156 /* ascii string, but commands are not matched. */
160 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
163 data->head.block = block_id;
164 /* only ue and ce errors are supported */
165 if (!memcmp("ue", err, 2))
166 data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
167 else if (!memcmp("ce", err, 2))
168 data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
175 if (sscanf(str, "%*s %*s %*s %u %llu %llu",
176 &sub_block, &address, &value) != 3)
177 if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
178 &sub_block, &address, &value) != 3)
180 data->head.sub_block_index = sub_block;
181 data->inject.address = address;
182 data->inject.value = value;
185 if (size < sizeof(*data))
188 if (copy_from_user(data, buf, sizeof(*data)))
195 static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
196 struct ras_common_if *head);
199 * DOC: AMDGPU RAS debugfs control interface
201 * It accepts struct ras_debug_if who has two members.
203 * First member: ras_debug_if::head or ras_debug_if::inject.
205 * head is used to indicate which IP block will be under control.
207 * head has four members, they are block, type, sub_block_index, name.
208 * block: which IP will be under control.
209 * type: what kind of error will be enabled/disabled/injected.
210 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
211 * name: the name of IP.
213 * inject has two more members than head, they are address, value.
214 * As their names indicate, inject operation will write the
215 * value to the address.
217 * Second member: struct ras_debug_if::op.
218 * It has three kinds of operations.
220 * - 0: disable RAS on the block. Take ::head as its data.
221 * - 1: enable RAS on the block. Take ::head as its data.
222 * - 2: inject errors on the block. Take ::inject as its data.
224 * How to use the interface?
226 * copy the struct ras_debug_if in your codes and initialize it.
227 * write the struct to the control node.
229 * .. code-block:: bash
231 * echo op block [error [sub_block address value]] > .../ras/ras_ctrl
233 * op: disable, enable, inject
234 * disable: only block is needed
235 * enable: block and error are needed
236 * inject: error, address, value are needed
237 * block: umc, sdma, gfx, .........
238 * see ras_block_string[] for details
240 * ue: multi_uncorrectable
241 * ce: single_correctable
243 * sub block index, pass 0 if there is no sub block
245 * here are some examples for bash commands:
247 * .. code-block:: bash
249 * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
250 * echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
251 * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
253 * How to check the result?
255 * For disable/enable, please check ras features at
256 * /sys/class/drm/card[0/1/2...]/device/ras/features
258 * For inject, please check corresponding err count at
259 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
262 * Operation is only allowed on blocks which are supported.
263 * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask
265 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *buf,
266 size_t size, loff_t *pos)
268 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
269 struct ras_debug_if data;
272 ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
276 if (!amdgpu_ras_is_supported(adev, data.head.block))
281 ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
284 ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
287 if ((data.inject.address >= adev->gmc.mc_vram_size) ||
288 (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
293 /* data.inject.address is offset instead of absolute gpu address */
294 ret = amdgpu_ras_error_inject(adev, &data.inject);
297 amdgpu_ras_get_context(adev)->reboot = true;
311 * DOC: AMDGPU RAS debugfs EEPROM table reset interface
313 * Some boards contain an EEPROM which is used to persistently store a list of
314 * bad pages containing ECC errors detected in vram. This interface provides
315 * a way to reset the EEPROM, e.g., after testing error injection.
319 * .. code-block:: bash
321 * echo 1 > ../ras/ras_eeprom_reset
323 * will reset EEPROM table to 0 entries.
326 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf,
327 size_t size, loff_t *pos)
329 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
332 ret = amdgpu_ras_eeprom_reset_table(&adev->psp.ras.ras->eeprom_control);
334 return ret == 1 ? size : -EIO;
337 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
338 .owner = THIS_MODULE,
340 .write = amdgpu_ras_debugfs_ctrl_write,
341 .llseek = default_llseek
344 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
345 .owner = THIS_MODULE,
347 .write = amdgpu_ras_debugfs_eeprom_write,
348 .llseek = default_llseek
352 * DOC: AMDGPU RAS sysfs Error Count Interface
354 * It allows user to read the error count for each IP block on the gpu through
355 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
357 * It outputs the multiple lines which report the uncorrected (ue) and corrected
360 * The format of one line is below,
366 * .. code-block:: bash
372 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
373 struct device_attribute *attr, char *buf)
375 struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
376 struct ras_query_if info = {
380 if (amdgpu_ras_error_query(obj->adev, &info))
383 return snprintf(buf, PAGE_SIZE, "%s: %lu\n%s: %lu\n",
385 "ce", info.ce_count);
390 #define get_obj(obj) do { (obj)->use++; } while (0)
391 #define alive_obj(obj) ((obj)->use)
393 static inline void put_obj(struct ras_manager *obj)
395 if (obj && --obj->use == 0)
396 list_del(&obj->node);
397 if (obj && obj->use < 0) {
398 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name);
402 /* make one obj and return it. */
403 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
404 struct ras_common_if *head)
406 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
407 struct ras_manager *obj;
412 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
415 obj = &con->objs[head->block];
416 /* already exist. return obj? */
422 list_add(&obj->node, &con->head);
428 /* return an obj equal to head, or the first when head is NULL */
429 static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
430 struct ras_common_if *head)
432 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
433 struct ras_manager *obj;
440 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
443 obj = &con->objs[head->block];
445 if (alive_obj(obj)) {
446 WARN_ON(head->block != obj->head.block);
450 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
452 if (alive_obj(obj)) {
453 WARN_ON(i != obj->head.block);
463 /* feature ctl begin */
464 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
465 struct ras_common_if *head)
467 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
469 return con->hw_supported & BIT(head->block);
472 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
473 struct ras_common_if *head)
475 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
477 return con->features & BIT(head->block);
481 * if obj is not created, then create one.
482 * set feature enable flag.
484 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
485 struct ras_common_if *head, int enable)
487 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
488 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
490 /* If hardware does not support ras, then do not create obj.
491 * But if hardware support ras, we can create the obj.
492 * Ras framework checks con->hw_supported to see if it need do
493 * corresponding initialization.
494 * IP checks con->support to see if it need disable ras.
496 if (!amdgpu_ras_is_feature_allowed(adev, head))
498 if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
503 obj = amdgpu_ras_create_obj(adev, head);
507 /* In case we create obj somewhere else */
510 con->features |= BIT(head->block);
512 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
513 con->features &= ~BIT(head->block);
521 /* wrapper of psp_ras_enable_features */
522 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
523 struct ras_common_if *head, bool enable)
525 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
526 union ta_ras_cmd_input info;
533 info.disable_features = (struct ta_ras_disable_features_input) {
534 .block_id = amdgpu_ras_block_to_ta(head->block),
535 .error_type = amdgpu_ras_error_to_ta(head->type),
538 info.enable_features = (struct ta_ras_enable_features_input) {
539 .block_id = amdgpu_ras_block_to_ta(head->block),
540 .error_type = amdgpu_ras_error_to_ta(head->type),
544 /* Do not enable if it is not allowed. */
545 WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
546 /* Are we alerady in that state we are going to set? */
547 if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
550 ret = psp_ras_enable_features(&adev->psp, &info, enable);
552 DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n",
553 enable ? "enable":"disable",
554 ras_block_str(head->block),
556 if (ret == TA_RAS_STATUS__RESET_NEEDED)
562 __amdgpu_ras_feature_enable(adev, head, enable);
567 /* Only used in device probe stage and called only once. */
568 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
569 struct ras_common_if *head, bool enable)
571 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
577 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
579 /* There is no harm to issue a ras TA cmd regardless of
580 * the currecnt ras state.
581 * If current state == target state, it will do nothing
582 * But sometimes it requests driver to reset and repost
583 * with error code -EAGAIN.
585 ret = amdgpu_ras_feature_enable(adev, head, 1);
586 /* With old ras TA, we might fail to enable ras.
587 * Log it and just setup the object.
588 * TODO need remove this WA in the future.
590 if (ret == -EINVAL) {
591 ret = __amdgpu_ras_feature_enable(adev, head, 1);
593 DRM_INFO("RAS INFO: %s setup object\n",
594 ras_block_str(head->block));
597 /* setup the object then issue a ras TA disable cmd.*/
598 ret = __amdgpu_ras_feature_enable(adev, head, 1);
602 ret = amdgpu_ras_feature_enable(adev, head, 0);
605 ret = amdgpu_ras_feature_enable(adev, head, enable);
610 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
613 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
614 struct ras_manager *obj, *tmp;
616 list_for_each_entry_safe(obj, tmp, &con->head, node) {
618 * aka just release the obj and corresponding flags
621 if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
624 if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
629 return con->features;
632 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
635 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
636 int ras_block_count = AMDGPU_RAS_BLOCK_COUNT;
638 const enum amdgpu_ras_error_type default_ras_type =
639 AMDGPU_RAS_ERROR__NONE;
641 for (i = 0; i < ras_block_count; i++) {
642 struct ras_common_if head = {
644 .type = default_ras_type,
645 .sub_block_index = 0,
647 strcpy(head.name, ras_block_str(i));
650 * bypass psp. vbios enable ras for us.
651 * so just create the obj
653 if (__amdgpu_ras_feature_enable(adev, &head, 1))
656 if (amdgpu_ras_feature_enable(adev, &head, 1))
661 return con->features;
663 /* feature ctl end */
665 /* query/inject/cure begin */
666 int amdgpu_ras_error_query(struct amdgpu_device *adev,
667 struct ras_query_if *info)
669 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
670 struct ras_err_data err_data = {0, 0, 0, NULL};
675 switch (info->head.block) {
676 case AMDGPU_RAS_BLOCK__UMC:
677 if (adev->umc.funcs->query_ras_error_count)
678 adev->umc.funcs->query_ras_error_count(adev, &err_data);
679 /* umc query_ras_error_address is also responsible for clearing
682 if (adev->umc.funcs->query_ras_error_address)
683 adev->umc.funcs->query_ras_error_address(adev, &err_data);
685 case AMDGPU_RAS_BLOCK__GFX:
686 if (adev->gfx.funcs->query_ras_error_count)
687 adev->gfx.funcs->query_ras_error_count(adev, &err_data);
689 case AMDGPU_RAS_BLOCK__MMHUB:
690 if (adev->mmhub.funcs->query_ras_error_count)
691 adev->mmhub.funcs->query_ras_error_count(adev, &err_data);
693 case AMDGPU_RAS_BLOCK__PCIE_BIF:
694 if (adev->nbio.funcs->query_ras_error_count)
695 adev->nbio.funcs->query_ras_error_count(adev, &err_data);
701 obj->err_data.ue_count += err_data.ue_count;
702 obj->err_data.ce_count += err_data.ce_count;
704 info->ue_count = obj->err_data.ue_count;
705 info->ce_count = obj->err_data.ce_count;
707 if (err_data.ce_count) {
708 dev_info(adev->dev, "%ld correctable errors detected in %s block\n",
709 obj->err_data.ce_count, ras_block_str(info->head.block));
711 if (err_data.ue_count) {
712 dev_info(adev->dev, "%ld uncorrectable errors detected in %s block\n",
713 obj->err_data.ue_count, ras_block_str(info->head.block));
719 /* wrapper of psp_ras_trigger_error */
720 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
721 struct ras_inject_if *info)
723 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
724 struct ta_ras_trigger_error_input block_info = {
725 .block_id = amdgpu_ras_block_to_ta(info->head.block),
726 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
727 .sub_block_index = info->head.sub_block_index,
728 .address = info->address,
729 .value = info->value,
736 switch (info->head.block) {
737 case AMDGPU_RAS_BLOCK__GFX:
738 if (adev->gfx.funcs->ras_error_inject)
739 ret = adev->gfx.funcs->ras_error_inject(adev, info);
743 case AMDGPU_RAS_BLOCK__UMC:
744 case AMDGPU_RAS_BLOCK__MMHUB:
745 case AMDGPU_RAS_BLOCK__XGMI_WAFL:
746 case AMDGPU_RAS_BLOCK__PCIE_BIF:
747 ret = psp_ras_trigger_error(&adev->psp, &block_info);
750 DRM_INFO("%s error injection is not supported yet\n",
751 ras_block_str(info->head.block));
756 DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n",
757 ras_block_str(info->head.block),
763 int amdgpu_ras_error_cure(struct amdgpu_device *adev,
764 struct ras_cure_if *info)
766 /* psp fw has no cure interface for now. */
770 /* get the total error counts on all IPs */
771 unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
774 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
775 struct ras_manager *obj;
776 struct ras_err_data data = {0, 0};
781 list_for_each_entry(obj, &con->head, node) {
782 struct ras_query_if info = {
786 if (amdgpu_ras_error_query(adev, &info))
789 data.ce_count += info.ce_count;
790 data.ue_count += info.ue_count;
793 return is_ce ? data.ce_count : data.ue_count;
795 /* query/inject/cure end */
800 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
801 struct ras_badpage **bps, unsigned int *count);
803 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
817 * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
819 * It allows user to read the bad pages of vram on the gpu through
820 * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
822 * It outputs multiple lines, and each line stands for one gpu page.
824 * The format of one line is below,
825 * gpu pfn : gpu page size : flags
827 * gpu pfn and gpu page size are printed in hex format.
828 * flags can be one of below character,
830 * R: reserved, this gpu page is reserved and not able to use.
832 * P: pending for reserve, this gpu page is marked as bad, will be reserved
833 * in next window of page_reserve.
835 * F: unable to reserve. this gpu page can't be reserved due to some reasons.
839 * .. code-block:: bash
841 * 0x00000001 : 0x00001000 : R
842 * 0x00000002 : 0x00001000 : P
846 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
847 struct kobject *kobj, struct bin_attribute *attr,
848 char *buf, loff_t ppos, size_t count)
850 struct amdgpu_ras *con =
851 container_of(attr, struct amdgpu_ras, badpages_attr);
852 struct amdgpu_device *adev = con->adev;
853 const unsigned int element_size =
854 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
855 unsigned int start = div64_ul(ppos + element_size - 1, element_size);
856 unsigned int end = div64_ul(ppos + count - 1, element_size);
858 struct ras_badpage *bps = NULL;
859 unsigned int bps_count = 0;
861 memset(buf, 0, count);
863 if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
866 for (; start < end && start < bps_count; start++)
867 s += scnprintf(&buf[s], element_size + 1,
868 "0x%08x : 0x%08x : %1s\n",
871 amdgpu_ras_badpage_flags_str(bps[start].flags));
878 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
879 struct device_attribute *attr, char *buf)
881 struct amdgpu_ras *con =
882 container_of(attr, struct amdgpu_ras, features_attr);
884 return scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features);
887 static int amdgpu_ras_sysfs_create_feature_node(struct amdgpu_device *adev)
889 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
890 struct attribute *attrs[] = {
891 &con->features_attr.attr,
894 struct bin_attribute *bin_attrs[] = {
898 struct attribute_group group = {
901 .bin_attrs = bin_attrs,
904 con->features_attr = (struct device_attribute) {
909 .show = amdgpu_ras_sysfs_features_read,
912 con->badpages_attr = (struct bin_attribute) {
914 .name = "gpu_vram_bad_pages",
919 .read = amdgpu_ras_sysfs_badpages_read,
922 sysfs_attr_init(attrs[0]);
923 sysfs_bin_attr_init(bin_attrs[0]);
925 return sysfs_create_group(&adev->dev->kobj, &group);
928 static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
930 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
931 struct attribute *attrs[] = {
932 &con->features_attr.attr,
935 struct bin_attribute *bin_attrs[] = {
939 struct attribute_group group = {
942 .bin_attrs = bin_attrs,
945 sysfs_remove_group(&adev->dev->kobj, &group);
950 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
951 struct ras_fs_if *head)
953 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
955 if (!obj || obj->attr_inuse)
960 memcpy(obj->fs_data.sysfs_name,
962 sizeof(obj->fs_data.sysfs_name));
964 obj->sysfs_attr = (struct device_attribute){
966 .name = obj->fs_data.sysfs_name,
969 .show = amdgpu_ras_sysfs_read,
971 sysfs_attr_init(&obj->sysfs_attr.attr);
973 if (sysfs_add_file_to_group(&adev->dev->kobj,
974 &obj->sysfs_attr.attr,
985 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
986 struct ras_common_if *head)
988 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
990 if (!obj || !obj->attr_inuse)
993 sysfs_remove_file_from_group(&adev->dev->kobj,
994 &obj->sysfs_attr.attr,
1002 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1004 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1005 struct ras_manager *obj, *tmp;
1007 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1008 amdgpu_ras_sysfs_remove(adev, &obj->head);
1011 amdgpu_ras_sysfs_remove_feature_node(adev);
1018 static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1020 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1021 struct drm_minor *minor = adev->ddev->primary;
1023 con->dir = debugfs_create_dir("ras", minor->debugfs_root);
1024 debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir,
1025 adev, &amdgpu_ras_debugfs_ctrl_ops);
1026 debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, con->dir,
1027 adev, &amdgpu_ras_debugfs_eeprom_ops);
1030 void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1031 struct ras_fs_if *head)
1033 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1034 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1036 if (!obj || obj->ent)
1041 memcpy(obj->fs_data.debugfs_name,
1043 sizeof(obj->fs_data.debugfs_name));
1045 obj->ent = debugfs_create_file(obj->fs_data.debugfs_name,
1046 S_IWUGO | S_IRUGO, con->dir, obj,
1047 &amdgpu_ras_debugfs_ops);
1050 void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
1051 struct ras_common_if *head)
1053 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1055 if (!obj || !obj->ent)
1058 debugfs_remove(obj->ent);
1063 static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
1065 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1066 struct ras_manager *obj, *tmp;
1068 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1069 amdgpu_ras_debugfs_remove(adev, &obj->head);
1072 debugfs_remove_recursive(con->dir);
1079 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
1081 amdgpu_ras_sysfs_create_feature_node(adev);
1082 amdgpu_ras_debugfs_create_ctrl_node(adev);
1087 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
1089 amdgpu_ras_debugfs_remove_all(adev);
1090 amdgpu_ras_sysfs_remove_all(adev);
1096 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
1098 struct ras_ih_data *data = &obj->ih_data;
1099 struct amdgpu_iv_entry entry;
1101 struct ras_err_data err_data = {0, 0, 0, NULL};
1103 while (data->rptr != data->wptr) {
1105 memcpy(&entry, &data->ring[data->rptr],
1106 data->element_size);
1109 data->rptr = (data->aligned_element_size +
1110 data->rptr) % data->ring_size;
1112 /* Let IP handle its data, maybe we need get the output
1113 * from the callback to udpate the error type/count, etc
1116 ret = data->cb(obj->adev, &err_data, &entry);
1117 /* ue will trigger an interrupt, and in that case
1118 * we need do a reset to recovery the whole system.
1119 * But leave IP do that recovery, here we just dispatch
1122 if (ret == AMDGPU_RAS_SUCCESS) {
1123 /* these counts could be left as 0 if
1124 * some blocks do not count error number
1126 obj->err_data.ue_count += err_data.ue_count;
1127 obj->err_data.ce_count += err_data.ce_count;
1133 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
1135 struct ras_ih_data *data =
1136 container_of(work, struct ras_ih_data, ih_work);
1137 struct ras_manager *obj =
1138 container_of(data, struct ras_manager, ih_data);
1140 amdgpu_ras_interrupt_handler(obj);
1143 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
1144 struct ras_dispatch_if *info)
1146 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1147 struct ras_ih_data *data = &obj->ih_data;
1152 if (data->inuse == 0)
1155 /* Might be overflow... */
1156 memcpy(&data->ring[data->wptr], info->entry,
1157 data->element_size);
1160 data->wptr = (data->aligned_element_size +
1161 data->wptr) % data->ring_size;
1163 schedule_work(&data->ih_work);
1168 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
1169 struct ras_ih_if *info)
1171 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1172 struct ras_ih_data *data;
1177 data = &obj->ih_data;
1178 if (data->inuse == 0)
1181 cancel_work_sync(&data->ih_work);
1184 memset(data, 0, sizeof(*data));
1190 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
1191 struct ras_ih_if *info)
1193 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1194 struct ras_ih_data *data;
1197 /* in case we registe the IH before enable ras feature */
1198 obj = amdgpu_ras_create_obj(adev, &info->head);
1204 data = &obj->ih_data;
1205 /* add the callback.etc */
1206 *data = (struct ras_ih_data) {
1209 .element_size = sizeof(struct amdgpu_iv_entry),
1214 INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
1216 data->aligned_element_size = ALIGN(data->element_size, 8);
1217 /* the ring can store 64 iv entries. */
1218 data->ring_size = 64 * data->aligned_element_size;
1219 data->ring = kmalloc(data->ring_size, GFP_KERNEL);
1231 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
1233 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1234 struct ras_manager *obj, *tmp;
1236 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1237 struct ras_ih_if info = {
1240 amdgpu_ras_interrupt_remove_handler(adev, &info);
1247 /* recovery begin */
1249 /* return 0 on success.
1250 * caller need free bps.
1252 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1253 struct ras_badpage **bps, unsigned int *count)
1255 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1256 struct ras_err_handler_data *data;
1260 if (!con || !con->eh_data || !bps || !count)
1263 mutex_lock(&con->recovery_lock);
1264 data = con->eh_data;
1265 if (!data || data->count == 0) {
1270 *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
1276 for (; i < data->count; i++) {
1277 (*bps)[i] = (struct ras_badpage){
1278 .bp = data->bps[i].retired_page,
1279 .size = AMDGPU_GPU_PAGE_SIZE,
1283 if (data->last_reserved <= i)
1284 (*bps)[i].flags = 1;
1285 else if (data->bps_bo[i] == NULL)
1286 (*bps)[i].flags = 2;
1289 *count = data->count;
1291 mutex_unlock(&con->recovery_lock);
1295 static void amdgpu_ras_do_recovery(struct work_struct *work)
1297 struct amdgpu_ras *ras =
1298 container_of(work, struct amdgpu_ras, recovery_work);
1300 amdgpu_device_gpu_recover(ras->adev, 0);
1301 atomic_set(&ras->in_recovery, 0);
1304 /* alloc/realloc bps array */
1305 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
1306 struct ras_err_handler_data *data, int pages)
1308 unsigned int old_space = data->count + data->space_left;
1309 unsigned int new_space = old_space + pages;
1310 unsigned int align_space = ALIGN(new_space, 512);
1311 void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
1312 struct amdgpu_bo **bps_bo =
1313 kmalloc(align_space * sizeof(*data->bps_bo), GFP_KERNEL);
1315 if (!bps || !bps_bo) {
1322 memcpy(bps, data->bps,
1323 data->count * sizeof(*data->bps));
1327 memcpy(bps_bo, data->bps_bo,
1328 data->count * sizeof(*data->bps_bo));
1329 kfree(data->bps_bo);
1333 data->bps_bo = bps_bo;
1334 data->space_left += align_space - old_space;
1338 /* it deal with vram only. */
1339 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
1340 struct eeprom_table_record *bps, int pages)
1342 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1343 struct ras_err_handler_data *data;
1346 if (!con || !con->eh_data || !bps || pages <= 0)
1349 mutex_lock(&con->recovery_lock);
1350 data = con->eh_data;
1354 if (data->space_left <= pages)
1355 if (amdgpu_ras_realloc_eh_data_space(adev, data, pages)) {
1360 memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps));
1361 data->count += pages;
1362 data->space_left -= pages;
1365 mutex_unlock(&con->recovery_lock);
1371 * write error record array to eeprom, the function should be
1372 * protected by recovery_lock
1374 static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
1376 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1377 struct ras_err_handler_data *data;
1378 struct amdgpu_ras_eeprom_control *control;
1381 if (!con || !con->eh_data)
1384 control = &con->eeprom_control;
1385 data = con->eh_data;
1386 save_count = data->count - control->num_recs;
1387 /* only new entries are saved */
1389 if (amdgpu_ras_eeprom_process_recods(control,
1390 &data->bps[control->num_recs],
1393 DRM_ERROR("Failed to save EEPROM table data!");
1401 * read error record array in eeprom and reserve enough space for
1402 * storing new bad pages
1404 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
1406 struct amdgpu_ras_eeprom_control *control =
1407 &adev->psp.ras.ras->eeprom_control;
1408 struct eeprom_table_record *bps = NULL;
1411 /* no bad page record, skip eeprom access */
1412 if (!control->num_recs)
1415 bps = kcalloc(control->num_recs, sizeof(*bps), GFP_KERNEL);
1419 if (amdgpu_ras_eeprom_process_recods(control, bps, false,
1420 control->num_recs)) {
1421 DRM_ERROR("Failed to load EEPROM table records!");
1426 ret = amdgpu_ras_add_bad_pages(adev, bps, control->num_recs);
1433 /* called in gpu recovery/init */
1434 int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
1436 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1437 struct ras_err_handler_data *data;
1439 struct amdgpu_bo *bo = NULL;
1442 if (!con || !con->eh_data)
1445 mutex_lock(&con->recovery_lock);
1446 data = con->eh_data;
1449 /* reserve vram at driver post stage. */
1450 for (i = data->last_reserved; i < data->count; i++) {
1451 bp = data->bps[i].retired_page;
1453 /* There are two cases of reserve error should be ignored:
1454 * 1) a ras bad page has been allocated (used by someone);
1455 * 2) a ras bad page has been reserved (duplicate error injection
1458 if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
1459 AMDGPU_GPU_PAGE_SIZE,
1460 AMDGPU_GEM_DOMAIN_VRAM,
1462 DRM_WARN("RAS WARN: reserve vram for retired page %llx fail\n", bp);
1464 data->bps_bo[i] = bo;
1465 data->last_reserved = i + 1;
1469 /* continue to save bad pages to eeprom even reesrve_vram fails */
1470 ret = amdgpu_ras_save_bad_pages(adev);
1472 mutex_unlock(&con->recovery_lock);
1476 /* called when driver unload */
1477 static int amdgpu_ras_release_bad_pages(struct amdgpu_device *adev)
1479 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1480 struct ras_err_handler_data *data;
1481 struct amdgpu_bo *bo;
1484 if (!con || !con->eh_data)
1487 mutex_lock(&con->recovery_lock);
1488 data = con->eh_data;
1492 for (i = data->last_reserved - 1; i >= 0; i--) {
1493 bo = data->bps_bo[i];
1495 amdgpu_bo_free_kernel(&bo, NULL, NULL);
1497 data->bps_bo[i] = bo;
1498 data->last_reserved = i;
1501 mutex_unlock(&con->recovery_lock);
1505 int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
1507 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1508 struct ras_err_handler_data **data;
1512 data = &con->eh_data;
1516 *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
1522 mutex_init(&con->recovery_lock);
1523 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
1524 atomic_set(&con->in_recovery, 0);
1527 ret = amdgpu_ras_eeprom_init(&con->eeprom_control);
1531 if (con->eeprom_control.num_recs) {
1532 ret = amdgpu_ras_load_bad_pages(adev);
1535 ret = amdgpu_ras_reserve_bad_pages(adev);
1543 amdgpu_ras_release_bad_pages(adev);
1545 kfree((*data)->bps);
1546 kfree((*data)->bps_bo);
1548 con->eh_data = NULL;
1550 DRM_WARN("Failed to initialize ras recovery!\n");
1555 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
1557 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1558 struct ras_err_handler_data *data = con->eh_data;
1560 /* recovery_init failed to init it, fini is useless */
1564 cancel_work_sync(&con->recovery_work);
1565 amdgpu_ras_release_bad_pages(adev);
1567 mutex_lock(&con->recovery_lock);
1568 con->eh_data = NULL;
1570 kfree(data->bps_bo);
1572 mutex_unlock(&con->recovery_lock);
1578 /* return 0 if ras will reset gpu and repost.*/
1579 int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
1582 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1587 ras->flags |= AMDGPU_RAS_FLAG_INIT_NEED_RESET;
1592 * check hardware's ras ability which will be saved in hw_supported.
1593 * if hardware does not support ras, we can skip some ras initializtion and
1594 * forbid some ras operations from IP.
1595 * if software itself, say boot parameter, limit the ras ability. We still
1596 * need allow IP do some limited operations, like disable. In such case,
1597 * we have to initialize ras as normal. but need check if operation is
1598 * allowed or not in each function.
1600 static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
1601 uint32_t *hw_supported, uint32_t *supported)
1606 if (amdgpu_sriov_vf(adev) ||
1607 adev->asic_type != CHIP_VEGA20)
1610 if (adev->is_atom_fw &&
1611 (amdgpu_atomfirmware_mem_ecc_supported(adev) ||
1612 amdgpu_atomfirmware_sram_ecc_supported(adev)))
1613 *hw_supported = AMDGPU_RAS_BLOCK_MASK;
1615 *supported = amdgpu_ras_enable == 0 ?
1616 0 : *hw_supported & amdgpu_ras_mask;
1619 int amdgpu_ras_init(struct amdgpu_device *adev)
1621 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1627 con = kmalloc(sizeof(struct amdgpu_ras) +
1628 sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT,
1629 GFP_KERNEL|__GFP_ZERO);
1633 con->objs = (struct ras_manager *)(con + 1);
1635 amdgpu_ras_set_context(adev, con);
1637 amdgpu_ras_check_supported(adev, &con->hw_supported,
1639 if (!con->hw_supported) {
1640 amdgpu_ras_set_context(adev, NULL);
1646 INIT_LIST_HEAD(&con->head);
1647 /* Might need get this flag from vbios. */
1648 con->flags = RAS_DEFAULT_FLAGS;
1650 if (adev->nbio.funcs->init_ras_controller_interrupt) {
1651 r = adev->nbio.funcs->init_ras_controller_interrupt(adev);
1656 if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) {
1657 r = adev->nbio.funcs->init_ras_err_event_athub_interrupt(adev);
1662 amdgpu_ras_mask &= AMDGPU_RAS_BLOCK_MASK;
1664 if (amdgpu_ras_fs_init(adev))
1667 DRM_INFO("RAS INFO: ras initialized successfully, "
1668 "hardware ability[%x] ras_mask[%x]\n",
1669 con->hw_supported, con->supported);
1672 amdgpu_ras_set_context(adev, NULL);
1678 /* helper function to handle common stuff in ip late init phase */
1679 int amdgpu_ras_late_init(struct amdgpu_device *adev,
1680 struct ras_common_if *ras_block,
1681 struct ras_fs_if *fs_info,
1682 struct ras_ih_if *ih_info)
1686 /* disable RAS feature per IP block if it is not supported */
1687 if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
1688 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
1692 r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
1695 /* request gpu reset. will run again */
1696 amdgpu_ras_request_reset_on_boot(adev,
1699 } else if (adev->in_suspend || adev->in_gpu_reset) {
1700 /* in resume phase, if fail to enable ras,
1701 * clean up all ras fs nodes, and disable ras */
1707 /* in resume phase, no need to create ras fs node */
1708 if (adev->in_suspend || adev->in_gpu_reset)
1712 r = amdgpu_ras_interrupt_add_handler(adev, ih_info);
1717 amdgpu_ras_debugfs_create(adev, fs_info);
1719 r = amdgpu_ras_sysfs_create(adev, fs_info);
1725 amdgpu_ras_sysfs_remove(adev, ras_block);
1727 amdgpu_ras_debugfs_remove(adev, ras_block);
1729 amdgpu_ras_interrupt_remove_handler(adev, ih_info);
1731 amdgpu_ras_feature_enable(adev, ras_block, 0);
1735 /* helper function to remove ras fs node and interrupt handler */
1736 void amdgpu_ras_late_fini(struct amdgpu_device *adev,
1737 struct ras_common_if *ras_block,
1738 struct ras_ih_if *ih_info)
1740 if (!ras_block || !ih_info)
1743 amdgpu_ras_sysfs_remove(adev, ras_block);
1744 amdgpu_ras_debugfs_remove(adev, ras_block);
1746 amdgpu_ras_interrupt_remove_handler(adev, ih_info);
1747 amdgpu_ras_feature_enable(adev, ras_block, 0);
1750 /* do some init work after IP late init as dependence.
1751 * and it runs in resume/gpu reset/booting up cases.
1753 void amdgpu_ras_resume(struct amdgpu_device *adev)
1755 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1756 struct ras_manager *obj, *tmp;
1761 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
1762 /* Set up all other IPs which are not implemented. There is a
1763 * tricky thing that IP's actual ras error type should be
1764 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
1765 * ERROR_NONE make sense anyway.
1767 amdgpu_ras_enable_all_features(adev, 1);
1769 /* We enable ras on all hw_supported block, but as boot
1770 * parameter might disable some of them and one or more IP has
1771 * not implemented yet. So we disable them on behalf.
1773 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1774 if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
1775 amdgpu_ras_feature_enable(adev, &obj->head, 0);
1776 /* there should be no any reference. */
1777 WARN_ON(alive_obj(obj));
1782 if (con->flags & AMDGPU_RAS_FLAG_INIT_NEED_RESET) {
1783 con->flags &= ~AMDGPU_RAS_FLAG_INIT_NEED_RESET;
1784 /* setup ras obj state as disabled.
1785 * for init_by_vbios case.
1786 * if we want to enable ras, just enable it in a normal way.
1787 * If we want do disable it, need setup ras obj as enabled,
1788 * then issue another TA disable cmd.
1789 * See feature_enable_on_boot
1791 amdgpu_ras_disable_all_features(adev, 1);
1792 amdgpu_ras_reset_gpu(adev, 0);
1796 void amdgpu_ras_suspend(struct amdgpu_device *adev)
1798 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1803 amdgpu_ras_disable_all_features(adev, 0);
1804 /* Make sure all ras objects are disabled. */
1806 amdgpu_ras_disable_all_features(adev, 1);
1809 /* do some fini work before IP fini as dependence */
1810 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
1812 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1817 /* Need disable ras on all IPs here before ip [hw/sw]fini */
1818 amdgpu_ras_disable_all_features(adev, 0);
1819 amdgpu_ras_recovery_fini(adev);
1823 int amdgpu_ras_fini(struct amdgpu_device *adev)
1825 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1830 amdgpu_ras_fs_fini(adev);
1831 amdgpu_ras_interrupt_remove_all(adev);
1833 WARN(con->features, "Feature mask is not cleared");
1836 amdgpu_ras_disable_all_features(adev, 1);
1838 amdgpu_ras_set_context(adev, NULL);
1844 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
1846 if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
1847 DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n");
1849 amdgpu_ras_reset_gpu(adev, false);