drm/amdgpu: fix semicolon.cocci warnings
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ras.c
CommitLineData
c030f2e4 1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/debugfs.h>
25#include <linux/list.h>
26#include <linux/module.h>
27#include "amdgpu.h"
28#include "amdgpu_ras.h"
b404ae82 29#include "amdgpu_atomfirmware.h"
c030f2e4 30
31struct ras_ih_data {
32 /* interrupt bottom half */
33 struct work_struct ih_work;
34 int inuse;
35 /* IP callback */
36 ras_ih_cb cb;
37 /* full of entries */
38 unsigned char *ring;
39 unsigned int ring_size;
40 unsigned int element_size;
41 unsigned int aligned_element_size;
42 unsigned int rptr;
43 unsigned int wptr;
44};
45
46struct ras_fs_data {
47 char sysfs_name[32];
48 char debugfs_name[32];
49};
50
51struct ras_err_data {
52 unsigned long ue_count;
53 unsigned long ce_count;
54};
55
56struct ras_err_handler_data {
57 /* point to bad pages array */
58 struct {
59 unsigned long bp;
60 struct amdgpu_bo *bo;
61 } *bps;
62 /* the count of entries */
63 int count;
64 /* the space can place new entries */
65 int space_left;
66 /* last reserved entry's index + 1 */
67 int last_reserved;
68};
69
70struct ras_manager {
71 struct ras_common_if head;
72 /* reference count */
73 int use;
74 /* ras block link */
75 struct list_head node;
76 /* the device */
77 struct amdgpu_device *adev;
78 /* debugfs */
79 struct dentry *ent;
80 /* sysfs */
81 struct device_attribute sysfs_attr;
82 int attr_inuse;
83
84 /* fs node name */
85 struct ras_fs_data fs_data;
86
87 /* IH data */
88 struct ras_ih_data ih_data;
89
90 struct ras_err_data err_data;
91};
92
93const char *ras_error_string[] = {
94 "none",
95 "parity",
96 "single_correctable",
97 "multi_uncorrectable",
98 "poison",
99};
100
101const char *ras_block_string[] = {
102 "umc",
103 "sdma",
104 "gfx",
105 "mmhub",
106 "athub",
107 "pcie_bif",
108 "hdp",
109 "xgmi_wafl",
110 "df",
111 "smn",
112 "sem",
113 "mp0",
114 "mp1",
115 "fuse",
116};
117
118#define ras_err_str(i) (ras_error_string[ffs(i)])
119#define ras_block_str(i) (ras_block_string[i])
120
108c6a63 121enum amdgpu_ras_flags {
122 AMDGPU_RAS_FLAG_INIT_BY_VBIOS = 1,
123};
124#define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
125
c030f2e4 126static void amdgpu_ras_self_test(struct amdgpu_device *adev)
127{
128 /* TODO */
129}
130
131static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
132 size_t size, loff_t *pos)
133{
134 struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
135 struct ras_query_if info = {
136 .head = obj->head,
137 };
138 ssize_t s;
139 char val[128];
140
141 if (amdgpu_ras_error_query(obj->adev, &info))
142 return -EINVAL;
143
144 s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
145 "ue", info.ue_count,
146 "ce", info.ce_count);
147 if (*pos >= s)
148 return 0;
149
150 s -= *pos;
151 s = min_t(u64, s, size);
152
153
154 if (copy_to_user(buf, &val[*pos], s))
155 return -EINVAL;
156
157 *pos += s;
158
159 return s;
160}
161
162static ssize_t amdgpu_ras_debugfs_write(struct file *f, const char __user *buf,
163 size_t size, loff_t *pos)
164{
165 struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
166 struct ras_inject_if info = {
167 .head = obj->head,
168 };
169 ssize_t s = min_t(u64, 64, size);
170 char val[64];
171 char *str = val;
172 memset(val, 0, sizeof(val));
173
174 if (*pos)
175 return -EINVAL;
176
177 if (copy_from_user(str, buf, s))
178 return -EINVAL;
179
180 /* only care ue/ce for now. */
181 if (memcmp(str, "ue", 2) == 0) {
182 info.head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
183 str += 2;
184 } else if (memcmp(str, "ce", 2) == 0) {
185 info.head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
186 str += 2;
187 }
188
189 if (sscanf(str, "0x%llx 0x%llx", &info.address, &info.value) != 2) {
190 if (sscanf(str, "%llu %llu", &info.address, &info.value) != 2)
191 return -EINVAL;
192 }
193
194 *pos = s;
195
196 if (amdgpu_ras_error_inject(obj->adev, &info))
197 return -EINVAL;
198
199 return size;
200}
201
202static const struct file_operations amdgpu_ras_debugfs_ops = {
203 .owner = THIS_MODULE,
204 .read = amdgpu_ras_debugfs_read,
205 .write = amdgpu_ras_debugfs_write,
206 .llseek = default_llseek
207};
208
96ebb307 209static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
210{
211 int i;
212
213 for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
214 *block_id = i;
215 if (strcmp(name, ras_block_str(i)) == 0)
216 return 0;
217 }
218 return -EINVAL;
219}
220
221static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
222 const char __user *buf, size_t size,
223 loff_t *pos, struct ras_debug_if *data)
224{
225 ssize_t s = min_t(u64, 64, size);
226 char str[65];
227 char block_name[33];
228 char err[9] = "ue";
229 int op = -1;
230 int block_id;
231 u64 address, value;
232
233 if (*pos)
234 return -EINVAL;
235 *pos = size;
236
237 memset(str, 0, sizeof(str));
238 memset(data, 0, sizeof(*data));
239
240 if (copy_from_user(str, buf, s))
241 return -EINVAL;
242
243 if (sscanf(str, "disable %32s", block_name) == 1)
244 op = 0;
245 else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
246 op = 1;
247 else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
248 op = 2;
b076296b 249 else if (str[0] && str[1] && str[2] && str[3])
96ebb307 250 /* ascii string, but commands are not matched. */
251 return -EINVAL;
252
253 if (op != -1) {
254 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
255 return -EINVAL;
256
257 data->head.block = block_id;
258 data->head.type = memcmp("ue", err, 2) == 0 ?
259 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE :
260 AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
261 data->op = op;
262
263 if (op == 2) {
264 if (sscanf(str, "%*s %*s %*s %llu %llu",
265 &address, &value) != 2)
266 if (sscanf(str, "%*s %*s %*s 0x%llx 0x%llx",
267 &address, &value) != 2)
268 return -EINVAL;
269 data->inject.address = address;
270 data->inject.value = value;
271 }
272 } else {
273 if (size < sizeof(data))
274 return -EINVAL;
275
276 if (copy_from_user(data, buf, sizeof(*data)))
277 return -EINVAL;
278 }
279
280 return 0;
281}
36ea1bd2 282/*
283 * DOC: ras debugfs control interface
284 *
285 * It accepts struct ras_debug_if who has two members.
286 *
287 * First member: ras_debug_if::head or ras_debug_if::inject.
96ebb307 288 *
289 * head is used to indicate which IP block will be under control.
36ea1bd2 290 *
291 * head has four members, they are block, type, sub_block_index, name.
292 * block: which IP will be under control.
293 * type: what kind of error will be enabled/disabled/injected.
294 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
295 * name: the name of IP.
296 *
297 * inject has two more members than head, they are address, value.
298 * As their names indicate, inject operation will write the
299 * value to the address.
300 *
301 * Second member: struct ras_debug_if::op.
302 * It has three kinds of operations.
303 * 0: disable RAS on the block. Take ::head as its data.
304 * 1: enable RAS on the block. Take ::head as its data.
305 * 2: inject errors on the block. Take ::inject as its data.
306 *
96ebb307 307 * How to use the interface?
308 * programs:
309 * copy the struct ras_debug_if in your codes and initialize it.
310 * write the struct to the control node.
311 *
312 * bash:
313 * echo op block [error [address value]] > .../ras/ras_ctrl
314 * op: disable, enable, inject
315 * disable: only block is needed
316 * enable: block and error are needed
317 * inject: error, address, value are needed
318 * block: umc, smda, gfx, .........
319 * see ras_block_string[] for details
320 * error: ue, ce
321 * ue: multi_uncorrectable
322 * ce: single_correctable
323 *
324 * here are some examples for bash commands,
325 * echo inject umc ue 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
326 * echo inject umc ce 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
327 * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
328 *
36ea1bd2 329 * How to check the result?
330 *
331 * For disable/enable, please check ras features at
332 * /sys/class/drm/card[0/1/2...]/device/ras/features
333 *
334 * For inject, please check corresponding err count at
335 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
336 *
337 * NOTE: operation is only allowed on blocks which are supported.
338 * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask
339 */
340static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *buf,
341 size_t size, loff_t *pos)
342{
343 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
344 struct ras_debug_if data;
345 int ret = 0;
346
96ebb307 347 ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
348 if (ret)
36ea1bd2 349 return -EINVAL;
350
36ea1bd2 351 if (!amdgpu_ras_is_supported(adev, data.head.block))
352 return -EINVAL;
353
354 switch (data.op) {
355 case 0:
356 ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
357 break;
358 case 1:
359 ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
360 break;
361 case 2:
362 ret = amdgpu_ras_error_inject(adev, &data.inject);
363 break;
96ebb307 364 default:
365 ret = -EINVAL;
366 break;
36ea1bd2 367 };
368
369 if (ret)
370 return -EINVAL;
371
372 return size;
373}
374
375static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
376 .owner = THIS_MODULE,
377 .read = NULL,
378 .write = amdgpu_ras_debugfs_ctrl_write,
379 .llseek = default_llseek
380};
381
c030f2e4 382static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
383 struct device_attribute *attr, char *buf)
384{
385 struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
386 struct ras_query_if info = {
387 .head = obj->head,
388 };
389
390 if (amdgpu_ras_error_query(obj->adev, &info))
391 return -EINVAL;
392
393 return snprintf(buf, PAGE_SIZE, "%s: %lu\n%s: %lu\n",
394 "ue", info.ue_count,
395 "ce", info.ce_count);
396}
397
398/* obj begin */
399
400#define get_obj(obj) do { (obj)->use++; } while (0)
401#define alive_obj(obj) ((obj)->use)
402
403static inline void put_obj(struct ras_manager *obj)
404{
405 if (obj && --obj->use == 0)
406 list_del(&obj->node);
407 if (obj && obj->use < 0) {
408 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name);
409 }
410}
411
412/* make one obj and return it. */
413static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
414 struct ras_common_if *head)
415{
416 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
417 struct ras_manager *obj;
418
419 if (!con)
420 return NULL;
421
422 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
423 return NULL;
424
425 obj = &con->objs[head->block];
426 /* already exist. return obj? */
427 if (alive_obj(obj))
428 return NULL;
429
430 obj->head = *head;
431 obj->adev = adev;
432 list_add(&obj->node, &con->head);
433 get_obj(obj);
434
435 return obj;
436}
437
438/* return an obj equal to head, or the first when head is NULL */
439static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
440 struct ras_common_if *head)
441{
442 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
443 struct ras_manager *obj;
444 int i;
445
446 if (!con)
447 return NULL;
448
449 if (head) {
450 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
451 return NULL;
452
453 obj = &con->objs[head->block];
454
455 if (alive_obj(obj)) {
456 WARN_ON(head->block != obj->head.block);
457 return obj;
458 }
459 } else {
460 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
461 obj = &con->objs[i];
462 if (alive_obj(obj)) {
463 WARN_ON(i != obj->head.block);
464 return obj;
465 }
466 }
467 }
468
469 return NULL;
470}
471/* obj end */
472
473/* feature ctl begin */
474static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
475 struct ras_common_if *head)
476{
5caf466a 477 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
478
479 return con->hw_supported & BIT(head->block);
c030f2e4 480}
481
482static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
483 struct ras_common_if *head)
484{
485 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
486
487 return con->features & BIT(head->block);
488}
489
490/*
491 * if obj is not created, then create one.
492 * set feature enable flag.
493 */
494static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
495 struct ras_common_if *head, int enable)
496{
497 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
498 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
499
5caf466a 500 /* If hardware does not support ras, then do not create obj.
501 * But if hardware support ras, we can create the obj.
502 * Ras framework checks con->hw_supported to see if it need do
503 * corresponding initialization.
504 * IP checks con->support to see if it need disable ras.
505 */
c030f2e4 506 if (!amdgpu_ras_is_feature_allowed(adev, head))
507 return 0;
508 if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
509 return 0;
510
511 if (enable) {
512 if (!obj) {
513 obj = amdgpu_ras_create_obj(adev, head);
514 if (!obj)
515 return -EINVAL;
516 } else {
517 /* In case we create obj somewhere else */
518 get_obj(obj);
519 }
520 con->features |= BIT(head->block);
521 } else {
522 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
523 con->features &= ~BIT(head->block);
524 put_obj(obj);
525 }
526 }
527
528 return 0;
529}
530
531/* wrapper of psp_ras_enable_features */
532int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
533 struct ras_common_if *head, bool enable)
534{
535 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
536 union ta_ras_cmd_input info;
537 int ret;
538
539 if (!con)
540 return -EINVAL;
541
542 if (!enable) {
543 info.disable_features = (struct ta_ras_disable_features_input) {
544 .block_id = head->block,
545 .error_type = head->type,
546 };
547 } else {
548 info.enable_features = (struct ta_ras_enable_features_input) {
549 .block_id = head->block,
550 .error_type = head->type,
551 };
552 }
553
554 /* Do not enable if it is not allowed. */
555 WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
556 /* Are we alerady in that state we are going to set? */
557 if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
558 return 0;
559
560 ret = psp_ras_enable_features(&adev->psp, &info, enable);
561 if (ret) {
562 DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n",
563 enable ? "enable":"disable",
564 ras_block_str(head->block),
565 ret);
566 return -EINVAL;
567 }
568
569 /* setup the obj */
570 __amdgpu_ras_feature_enable(adev, head, enable);
571
572 return 0;
573}
574
575static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
576 bool bypass)
577{
578 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
579 struct ras_manager *obj, *tmp;
580
581 list_for_each_entry_safe(obj, tmp, &con->head, node) {
582 /* bypass psp.
583 * aka just release the obj and corresponding flags
584 */
585 if (bypass) {
586 if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
587 break;
588 } else {
589 if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
590 break;
591 }
289d513b 592 }
c030f2e4 593
594 return con->features;
595}
596
597static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
598 bool bypass)
599{
600 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
601 int ras_block_count = AMDGPU_RAS_BLOCK_COUNT;
602 int i;
603
604 for (i = 0; i < ras_block_count; i++) {
605 struct ras_common_if head = {
606 .block = i,
607 .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
608 .sub_block_index = 0,
609 };
610 strcpy(head.name, ras_block_str(i));
611 if (bypass) {
612 /*
613 * bypass psp. vbios enable ras for us.
614 * so just create the obj
615 */
616 if (__amdgpu_ras_feature_enable(adev, &head, 1))
617 break;
618 } else {
619 if (amdgpu_ras_feature_enable(adev, &head, 1))
620 break;
621 }
289d513b 622 }
c030f2e4 623
624 return con->features;
625}
626/* feature ctl end */
627
628/* query/inject/cure begin */
629int amdgpu_ras_error_query(struct amdgpu_device *adev,
630 struct ras_query_if *info)
631{
632 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
633
634 if (!obj)
635 return -EINVAL;
636 /* TODO might read the register to read the count */
637
638 info->ue_count = obj->err_data.ue_count;
639 info->ce_count = obj->err_data.ce_count;
640
641 return 0;
642}
643
644/* wrapper of psp_ras_trigger_error */
645int amdgpu_ras_error_inject(struct amdgpu_device *adev,
646 struct ras_inject_if *info)
647{
648 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
649 struct ta_ras_trigger_error_input block_info = {
650 .block_id = info->head.block,
651 .inject_error_type = info->head.type,
652 .sub_block_index = info->head.sub_block_index,
653 .address = info->address,
654 .value = info->value,
655 };
656 int ret = 0;
657
658 if (!obj)
659 return -EINVAL;
660
661 ret = psp_ras_trigger_error(&adev->psp, &block_info);
662 if (ret)
663 DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n",
664 ras_block_str(info->head.block),
665 ret);
666
667 return ret;
668}
669
670int amdgpu_ras_error_cure(struct amdgpu_device *adev,
671 struct ras_cure_if *info)
672{
673 /* psp fw has no cure interface for now. */
674 return 0;
675}
676
677/* get the total error counts on all IPs */
678int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
679 bool is_ce)
680{
681 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
682 struct ras_manager *obj;
683 struct ras_err_data data = {0, 0};
684
685 if (!con)
686 return -EINVAL;
687
688 list_for_each_entry(obj, &con->head, node) {
689 struct ras_query_if info = {
690 .head = obj->head,
691 };
692
693 if (amdgpu_ras_error_query(adev, &info))
694 return -EINVAL;
695
696 data.ce_count += info.ce_count;
697 data.ue_count += info.ue_count;
698 }
699
700 return is_ce ? data.ce_count : data.ue_count;
701}
702/* query/inject/cure end */
703
704
705/* sysfs begin */
706
707static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
708 struct device_attribute *attr, char *buf)
709{
710 struct amdgpu_ras *con =
711 container_of(attr, struct amdgpu_ras, features_attr);
712 struct drm_device *ddev = dev_get_drvdata(dev);
713 struct amdgpu_device *adev = ddev->dev_private;
714 struct ras_common_if head;
715 int ras_block_count = AMDGPU_RAS_BLOCK_COUNT;
716 int i;
717 ssize_t s;
718 struct ras_manager *obj;
719
720 s = scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features);
721
722 for (i = 0; i < ras_block_count; i++) {
723 head.block = i;
724
725 if (amdgpu_ras_is_feature_enabled(adev, &head)) {
726 obj = amdgpu_ras_find_obj(adev, &head);
727 s += scnprintf(&buf[s], PAGE_SIZE - s,
728 "%s: %s\n",
729 ras_block_str(i),
730 ras_err_str(obj->head.type));
731 } else
732 s += scnprintf(&buf[s], PAGE_SIZE - s,
733 "%s: disabled\n",
734 ras_block_str(i));
735 }
736
737 return s;
738}
739
740static int amdgpu_ras_sysfs_create_feature_node(struct amdgpu_device *adev)
741{
742 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
743 struct attribute *attrs[] = {
744 &con->features_attr.attr,
745 NULL
746 };
747 struct attribute_group group = {
748 .name = "ras",
749 .attrs = attrs,
750 };
751
752 con->features_attr = (struct device_attribute) {
753 .attr = {
754 .name = "features",
755 .mode = S_IRUGO,
756 },
757 .show = amdgpu_ras_sysfs_features_read,
758 };
163def43 759 sysfs_attr_init(attrs[0]);
c030f2e4 760
761 return sysfs_create_group(&adev->dev->kobj, &group);
762}
763
764static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
765{
766 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
767 struct attribute *attrs[] = {
768 &con->features_attr.attr,
769 NULL
770 };
771 struct attribute_group group = {
772 .name = "ras",
773 .attrs = attrs,
774 };
775
776 sysfs_remove_group(&adev->dev->kobj, &group);
777
778 return 0;
779}
780
781int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
782 struct ras_fs_if *head)
783{
784 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
785
786 if (!obj || obj->attr_inuse)
787 return -EINVAL;
788
789 get_obj(obj);
790
791 memcpy(obj->fs_data.sysfs_name,
792 head->sysfs_name,
793 sizeof(obj->fs_data.sysfs_name));
794
795 obj->sysfs_attr = (struct device_attribute){
796 .attr = {
797 .name = obj->fs_data.sysfs_name,
798 .mode = S_IRUGO,
799 },
800 .show = amdgpu_ras_sysfs_read,
801 };
163def43 802 sysfs_attr_init(&obj->sysfs_attr.attr);
c030f2e4 803
804 if (sysfs_add_file_to_group(&adev->dev->kobj,
805 &obj->sysfs_attr.attr,
806 "ras")) {
807 put_obj(obj);
808 return -EINVAL;
809 }
810
811 obj->attr_inuse = 1;
812
813 return 0;
814}
815
816int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
817 struct ras_common_if *head)
818{
819 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
820
821 if (!obj || !obj->attr_inuse)
822 return -EINVAL;
823
824 sysfs_remove_file_from_group(&adev->dev->kobj,
825 &obj->sysfs_attr.attr,
826 "ras");
827 obj->attr_inuse = 0;
828 put_obj(obj);
829
830 return 0;
831}
832
833static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
834{
835 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
836 struct ras_manager *obj, *tmp;
837
838 list_for_each_entry_safe(obj, tmp, &con->head, node) {
839 amdgpu_ras_sysfs_remove(adev, &obj->head);
840 }
841
842 amdgpu_ras_sysfs_remove_feature_node(adev);
843
844 return 0;
845}
846/* sysfs end */
847
848/* debugfs begin */
36ea1bd2 849static int amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
850{
851 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
852 struct drm_minor *minor = adev->ddev->primary;
853 struct dentry *root = minor->debugfs_root, *dir;
854 struct dentry *ent;
855
856 dir = debugfs_create_dir("ras", root);
857 if (IS_ERR(dir))
858 return -EINVAL;
859
860 con->dir = dir;
861
862 ent = debugfs_create_file("ras_ctrl",
863 S_IWUGO | S_IRUGO, con->dir,
864 adev, &amdgpu_ras_debugfs_ctrl_ops);
865 if (IS_ERR(ent)) {
866 debugfs_remove(con->dir);
867 return -EINVAL;
868 }
869
870 con->ent = ent;
871 return 0;
872}
873
c030f2e4 874int amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
875 struct ras_fs_if *head)
876{
877 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
878 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
879 struct dentry *ent;
880
881 if (!obj || obj->ent)
882 return -EINVAL;
883
884 get_obj(obj);
885
886 memcpy(obj->fs_data.debugfs_name,
887 head->debugfs_name,
888 sizeof(obj->fs_data.debugfs_name));
889
890 ent = debugfs_create_file(obj->fs_data.debugfs_name,
891 S_IWUGO | S_IRUGO, con->dir,
892 obj, &amdgpu_ras_debugfs_ops);
893
894 if (IS_ERR(ent))
895 return -EINVAL;
896
897 obj->ent = ent;
898
899 return 0;
900}
901
902int amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
903 struct ras_common_if *head)
904{
905 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
906
907 if (!obj || !obj->ent)
908 return 0;
909
910 debugfs_remove(obj->ent);
911 obj->ent = NULL;
912 put_obj(obj);
913
914 return 0;
915}
916
917static int amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
918{
919 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
920 struct ras_manager *obj, *tmp;
921
922 list_for_each_entry_safe(obj, tmp, &con->head, node) {
923 amdgpu_ras_debugfs_remove(adev, &obj->head);
924 }
925
36ea1bd2 926 debugfs_remove(con->ent);
c030f2e4 927 debugfs_remove(con->dir);
928 con->dir = NULL;
36ea1bd2 929 con->ent = NULL;
c030f2e4 930
931 return 0;
932}
933/* debugfs end */
934
935/* ras fs */
936
937static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
938{
c030f2e4 939 amdgpu_ras_sysfs_create_feature_node(adev);
36ea1bd2 940 amdgpu_ras_debugfs_create_ctrl_node(adev);
c030f2e4 941
942 return 0;
943}
944
945static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
946{
947 amdgpu_ras_debugfs_remove_all(adev);
948 amdgpu_ras_sysfs_remove_all(adev);
949 return 0;
950}
951/* ras fs end */
952
953/* ih begin */
954static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
955{
956 struct ras_ih_data *data = &obj->ih_data;
957 struct amdgpu_iv_entry entry;
958 int ret;
959
960 while (data->rptr != data->wptr) {
961 rmb();
962 memcpy(&entry, &data->ring[data->rptr],
963 data->element_size);
964
965 wmb();
966 data->rptr = (data->aligned_element_size +
967 data->rptr) % data->ring_size;
968
969 /* Let IP handle its data, maybe we need get the output
970 * from the callback to udpate the error type/count, etc
971 */
972 if (data->cb) {
973 ret = data->cb(obj->adev, &entry);
974 /* ue will trigger an interrupt, and in that case
975 * we need do a reset to recovery the whole system.
976 * But leave IP do that recovery, here we just dispatch
977 * the error.
978 */
979 if (ret == AMDGPU_RAS_UE) {
980 obj->err_data.ue_count++;
981 }
982 /* Might need get ce count by register, but not all IP
983 * saves ce count, some IP just use one bit or two bits
984 * to indicate ce happened.
985 */
986 }
987 }
988}
989
990static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
991{
992 struct ras_ih_data *data =
993 container_of(work, struct ras_ih_data, ih_work);
994 struct ras_manager *obj =
995 container_of(data, struct ras_manager, ih_data);
996
997 amdgpu_ras_interrupt_handler(obj);
998}
999
1000int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
1001 struct ras_dispatch_if *info)
1002{
1003 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1004 struct ras_ih_data *data = &obj->ih_data;
1005
1006 if (!obj)
1007 return -EINVAL;
1008
1009 if (data->inuse == 0)
1010 return 0;
1011
1012 /* Might be overflow... */
1013 memcpy(&data->ring[data->wptr], info->entry,
1014 data->element_size);
1015
1016 wmb();
1017 data->wptr = (data->aligned_element_size +
1018 data->wptr) % data->ring_size;
1019
1020 schedule_work(&data->ih_work);
1021
1022 return 0;
1023}
1024
1025int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
1026 struct ras_ih_if *info)
1027{
1028 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1029 struct ras_ih_data *data;
1030
1031 if (!obj)
1032 return -EINVAL;
1033
1034 data = &obj->ih_data;
1035 if (data->inuse == 0)
1036 return 0;
1037
1038 cancel_work_sync(&data->ih_work);
1039
1040 kfree(data->ring);
1041 memset(data, 0, sizeof(*data));
1042 put_obj(obj);
1043
1044 return 0;
1045}
1046
1047int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
1048 struct ras_ih_if *info)
1049{
1050 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1051 struct ras_ih_data *data;
1052
1053 if (!obj) {
1054 /* in case we registe the IH before enable ras feature */
1055 obj = amdgpu_ras_create_obj(adev, &info->head);
1056 if (!obj)
1057 return -EINVAL;
1058 } else
1059 get_obj(obj);
1060
1061 data = &obj->ih_data;
1062 /* add the callback.etc */
1063 *data = (struct ras_ih_data) {
1064 .inuse = 0,
1065 .cb = info->cb,
1066 .element_size = sizeof(struct amdgpu_iv_entry),
1067 .rptr = 0,
1068 .wptr = 0,
1069 };
1070
1071 INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
1072
1073 data->aligned_element_size = ALIGN(data->element_size, 8);
1074 /* the ring can store 64 iv entries. */
1075 data->ring_size = 64 * data->aligned_element_size;
1076 data->ring = kmalloc(data->ring_size, GFP_KERNEL);
1077 if (!data->ring) {
1078 put_obj(obj);
1079 return -ENOMEM;
1080 }
1081
1082 /* IH is ready */
1083 data->inuse = 1;
1084
1085 return 0;
1086}
1087
1088static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
1089{
1090 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1091 struct ras_manager *obj, *tmp;
1092
1093 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1094 struct ras_ih_if info = {
1095 .head = obj->head,
1096 };
1097 amdgpu_ras_interrupt_remove_handler(adev, &info);
1098 }
1099
1100 return 0;
1101}
1102/* ih end */
1103
1104/* recovery begin */
1105static void amdgpu_ras_do_recovery(struct work_struct *work)
1106{
1107 struct amdgpu_ras *ras =
1108 container_of(work, struct amdgpu_ras, recovery_work);
1109
1110 amdgpu_device_gpu_recover(ras->adev, 0);
1111 atomic_set(&ras->in_recovery, 0);
1112}
1113
1114static int amdgpu_ras_release_vram(struct amdgpu_device *adev,
1115 struct amdgpu_bo **bo_ptr)
1116{
1117 /* no need to free it actually. */
1118 amdgpu_bo_free_kernel(bo_ptr, NULL, NULL);
1119 return 0;
1120}
1121
1122/* reserve vram with size@offset */
1123static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev,
1124 uint64_t offset, uint64_t size,
1125 struct amdgpu_bo **bo_ptr)
1126{
1127 struct ttm_operation_ctx ctx = { false, false };
1128 struct amdgpu_bo_param bp;
1129 int r = 0;
1130 int i;
1131 struct amdgpu_bo *bo;
1132
1133 if (bo_ptr)
1134 *bo_ptr = NULL;
1135 memset(&bp, 0, sizeof(bp));
1136 bp.size = size;
1137 bp.byte_align = PAGE_SIZE;
1138 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
1139 bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
1140 AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1141 bp.type = ttm_bo_type_kernel;
1142 bp.resv = NULL;
1143
1144 r = amdgpu_bo_create(adev, &bp, &bo);
1145 if (r)
1146 return -EINVAL;
1147
1148 r = amdgpu_bo_reserve(bo, false);
1149 if (r)
1150 goto error_reserve;
1151
1152 offset = ALIGN(offset, PAGE_SIZE);
1153 for (i = 0; i < bo->placement.num_placement; ++i) {
1154 bo->placements[i].fpfn = offset >> PAGE_SHIFT;
1155 bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
1156 }
1157
1158 ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
1159 r = ttm_bo_mem_space(&bo->tbo, &bo->placement, &bo->tbo.mem, &ctx);
1160 if (r)
1161 goto error_pin;
1162
1163 r = amdgpu_bo_pin_restricted(bo,
1164 AMDGPU_GEM_DOMAIN_VRAM,
1165 offset,
1166 offset + size);
1167 if (r)
1168 goto error_pin;
1169
1170 if (bo_ptr)
1171 *bo_ptr = bo;
1172
1173 amdgpu_bo_unreserve(bo);
1174 return r;
1175
1176error_pin:
1177 amdgpu_bo_unreserve(bo);
1178error_reserve:
1179 amdgpu_bo_unref(&bo);
1180 return r;
1181}
1182
1183/* alloc/realloc bps array */
1184static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
1185 struct ras_err_handler_data *data, int pages)
1186{
1187 unsigned int old_space = data->count + data->space_left;
1188 unsigned int new_space = old_space + pages;
1189 unsigned int align_space = ALIGN(new_space, 1024);
1190 void *tmp = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
1191
1192 if (!tmp)
1193 return -ENOMEM;
1194
1195 if (data->bps) {
1196 memcpy(tmp, data->bps,
1197 data->count * sizeof(*data->bps));
1198 kfree(data->bps);
1199 }
1200
1201 data->bps = tmp;
1202 data->space_left += align_space - old_space;
1203 return 0;
1204}
1205
1206/* it deal with vram only. */
1207int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
1208 unsigned long *bps, int pages)
1209{
1210 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1211 struct ras_err_handler_data *data = con->eh_data;
1212 int i = pages;
1213 int ret = 0;
1214
1215 if (!con || !data || !bps || pages <= 0)
1216 return 0;
1217
1218 mutex_lock(&con->recovery_lock);
1219 if (!data)
1220 goto out;
1221
1222 if (data->space_left <= pages)
1223 if (amdgpu_ras_realloc_eh_data_space(adev, data, pages)) {
1224 ret = -ENOMEM;
1225 goto out;
1226 }
1227
1228 while (i--)
1229 data->bps[data->count++].bp = bps[i];
1230
1231 data->space_left -= pages;
1232out:
1233 mutex_unlock(&con->recovery_lock);
1234
1235 return ret;
1236}
1237
1238/* called in gpu recovery/init */
1239int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
1240{
1241 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1242 struct ras_err_handler_data *data = con->eh_data;
1243 uint64_t bp;
1244 struct amdgpu_bo *bo;
1245 int i;
1246
1247 if (!con || !data)
1248 return 0;
1249
1250 mutex_lock(&con->recovery_lock);
1251 /* reserve vram at driver post stage. */
1252 for (i = data->last_reserved; i < data->count; i++) {
1253 bp = data->bps[i].bp;
1254
1255 if (amdgpu_ras_reserve_vram(adev, bp << PAGE_SHIFT,
1256 PAGE_SIZE, &bo))
1257 DRM_ERROR("RAS ERROR: reserve vram %llx fail\n", bp);
1258
1259 data->bps[i].bo = bo;
1260 data->last_reserved = i + 1;
1261 }
1262 mutex_unlock(&con->recovery_lock);
1263 return 0;
1264}
1265
1266/* called when driver unload */
1267static int amdgpu_ras_release_bad_pages(struct amdgpu_device *adev)
1268{
1269 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1270 struct ras_err_handler_data *data = con->eh_data;
1271 struct amdgpu_bo *bo;
1272 int i;
1273
1274 if (!con || !data)
1275 return 0;
1276
1277 mutex_lock(&con->recovery_lock);
1278 for (i = data->last_reserved - 1; i >= 0; i--) {
1279 bo = data->bps[i].bo;
1280
1281 amdgpu_ras_release_vram(adev, &bo);
1282
1283 data->bps[i].bo = bo;
1284 data->last_reserved = i;
1285 }
1286 mutex_unlock(&con->recovery_lock);
1287 return 0;
1288}
1289
1290static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
1291{
1292 /* TODO
1293 * write the array to eeprom when SMU disabled.
1294 */
1295 return 0;
1296}
1297
1298static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
1299{
1300 /* TODO
1301 * read the array to eeprom when SMU disabled.
1302 */
1303 return 0;
1304}
1305
1306static int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
1307{
1308 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1309 struct ras_err_handler_data **data = &con->eh_data;
1310
1311 *data = kmalloc(sizeof(**data),
1312 GFP_KERNEL|__GFP_ZERO);
1313 if (!*data)
1314 return -ENOMEM;
1315
1316 mutex_init(&con->recovery_lock);
1317 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
1318 atomic_set(&con->in_recovery, 0);
1319 con->adev = adev;
1320
1321 amdgpu_ras_load_bad_pages(adev);
1322 amdgpu_ras_reserve_bad_pages(adev);
1323
1324 return 0;
1325}
1326
1327static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
1328{
1329 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1330 struct ras_err_handler_data *data = con->eh_data;
1331
1332 cancel_work_sync(&con->recovery_work);
1333 amdgpu_ras_save_bad_pages(adev);
1334 amdgpu_ras_release_bad_pages(adev);
1335
1336 mutex_lock(&con->recovery_lock);
1337 con->eh_data = NULL;
1338 kfree(data->bps);
1339 kfree(data);
1340 mutex_unlock(&con->recovery_lock);
1341
1342 return 0;
1343}
1344/* recovery end */
1345
5caf466a 1346/*
1347 * check hardware's ras ability which will be saved in hw_supported.
1348 * if hardware does not support ras, we can skip some ras initializtion and
1349 * forbid some ras operations from IP.
1350 * if software itself, say boot parameter, limit the ras ability. We still
1351 * need allow IP do some limited operations, like disable. In such case,
1352 * we have to initialize ras as normal. but need check if operation is
1353 * allowed or not in each function.
1354 */
1355static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
1356 uint32_t *hw_supported, uint32_t *supported)
c030f2e4 1357{
5caf466a 1358 *hw_supported = 0;
1359 *supported = 0;
c030f2e4 1360
5caf466a 1361 if (amdgpu_sriov_vf(adev) ||
b404ae82 1362 adev->asic_type != CHIP_VEGA20)
5caf466a 1363 return;
b404ae82 1364
5d0f903f 1365 if (adev->is_atom_fw &&
1366 (amdgpu_atomfirmware_mem_ecc_supported(adev) ||
1367 amdgpu_atomfirmware_sram_ecc_supported(adev)))
5caf466a 1368 *hw_supported = AMDGPU_RAS_BLOCK_MASK;
b404ae82 1369
5caf466a 1370 *supported = amdgpu_ras_enable == 0 ?
1371 0 : *hw_supported & amdgpu_ras_mask;
c030f2e4 1372}
1373
1374int amdgpu_ras_init(struct amdgpu_device *adev)
1375{
1376 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
c030f2e4 1377
b404ae82 1378 if (con)
c030f2e4 1379 return 0;
1380
1381 con = kmalloc(sizeof(struct amdgpu_ras) +
1382 sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT,
1383 GFP_KERNEL|__GFP_ZERO);
1384 if (!con)
1385 return -ENOMEM;
1386
1387 con->objs = (struct ras_manager *)(con + 1);
1388
1389 amdgpu_ras_set_context(adev, con);
1390
5caf466a 1391 amdgpu_ras_check_supported(adev, &con->hw_supported,
1392 &con->supported);
c030f2e4 1393 con->features = 0;
1394 INIT_LIST_HEAD(&con->head);
108c6a63 1395 /* Might need get this flag from vbios. */
1396 con->flags = RAS_DEFAULT_FLAGS;
c030f2e4 1397
1398 if (amdgpu_ras_recovery_init(adev))
1399 goto recovery_out;
1400
1401 amdgpu_ras_mask &= AMDGPU_RAS_BLOCK_MASK;
1402
108c6a63 1403 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
1404 amdgpu_ras_enable_all_features(adev, 1);
c030f2e4 1405
1406 if (amdgpu_ras_fs_init(adev))
1407 goto fs_out;
1408
1409 amdgpu_ras_self_test(adev);
5d0f903f 1410
1411 DRM_INFO("RAS INFO: ras initialized successfully, "
1412 "hardware ability[%x] ras_mask[%x]\n",
1413 con->hw_supported, con->supported);
c030f2e4 1414 return 0;
1415fs_out:
1416 amdgpu_ras_recovery_fini(adev);
1417recovery_out:
1418 amdgpu_ras_set_context(adev, NULL);
1419 kfree(con);
1420
1421 return -EINVAL;
1422}
1423
108c6a63 1424/* do some init work after IP late init as dependence */
1425void amdgpu_ras_post_init(struct amdgpu_device *adev)
1426{
1427 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1428 struct ras_manager *obj, *tmp;
1429
1430 if (!con)
1431 return;
1432
1433 /* We enable ras on all hw_supported block, but as boot parameter might
1434 * disable some of them and one or more IP has not implemented yet.
1435 * So we disable them on behalf.
1436 */
1437 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
1438 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1439 if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
1440 amdgpu_ras_feature_enable(adev, &obj->head, 0);
1441 /* there should be no any reference. */
1442 WARN_ON(alive_obj(obj));
1443 }
1444 };
1445 }
1446}
1447
c030f2e4 1448/* do some fini work before IP fini as dependence */
1449int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
1450{
1451 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1452
1453 if (!con)
1454 return 0;
1455
1456 /* Need disable ras on all IPs here before ip [hw/sw]fini */
1457 amdgpu_ras_disable_all_features(adev, 0);
1458 amdgpu_ras_recovery_fini(adev);
1459 return 0;
1460}
1461
1462int amdgpu_ras_fini(struct amdgpu_device *adev)
1463{
1464 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1465
1466 if (!con)
1467 return 0;
1468
1469 amdgpu_ras_fs_fini(adev);
1470 amdgpu_ras_interrupt_remove_all(adev);
1471
1472 WARN(con->features, "Feature mask is not cleared");
1473
1474 if (con->features)
1475 amdgpu_ras_disable_all_features(adev, 1);
1476
1477 amdgpu_ras_set_context(adev, NULL);
1478 kfree(con);
1479
1480 return 0;
1481}