Commit | Line | Data |
---|---|---|
c030f2e4 | 1 | /* |
2 | * Copyright 2018 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * | |
23 | */ | |
24 | #include <linux/debugfs.h> | |
25 | #include <linux/list.h> | |
26 | #include <linux/module.h> | |
f867723b | 27 | #include <linux/uaccess.h> |
7c6e68c7 AG |
28 | #include <linux/reboot.h> |
29 | #include <linux/syscalls.h> | |
f867723b | 30 | |
c030f2e4 | 31 | #include "amdgpu.h" |
32 | #include "amdgpu_ras.h" | |
b404ae82 | 33 | #include "amdgpu_atomfirmware.h" |
4e644fff | 34 | #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" |
c030f2e4 | 35 | |
c030f2e4 | 36 | const char *ras_error_string[] = { |
37 | "none", | |
38 | "parity", | |
39 | "single_correctable", | |
40 | "multi_uncorrectable", | |
41 | "poison", | |
42 | }; | |
43 | ||
44 | const char *ras_block_string[] = { | |
45 | "umc", | |
46 | "sdma", | |
47 | "gfx", | |
48 | "mmhub", | |
49 | "athub", | |
50 | "pcie_bif", | |
51 | "hdp", | |
52 | "xgmi_wafl", | |
53 | "df", | |
54 | "smn", | |
55 | "sem", | |
56 | "mp0", | |
57 | "mp1", | |
58 | "fuse", | |
59 | }; | |
60 | ||
61 | #define ras_err_str(i) (ras_error_string[ffs(i)]) | |
62 | #define ras_block_str(i) (ras_block_string[i]) | |
63 | ||
a564808e | 64 | #define AMDGPU_RAS_FLAG_INIT_BY_VBIOS 1 |
65 | #define AMDGPU_RAS_FLAG_INIT_NEED_RESET 2 | |
108c6a63 | 66 | #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS) |
67 | ||
7cdc2ee3 TZ |
68 | /* inject address is 52 bits */ |
69 | #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52) | |
70 | ||
7c6e68c7 AG |
71 | |
72 | atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0); | |
73 | ||
6e4be987 TZ |
74 | static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, |
75 | uint64_t addr); | |
76 | ||
c030f2e4 | 77 | static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, |
78 | size_t size, loff_t *pos) | |
79 | { | |
80 | struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private; | |
81 | struct ras_query_if info = { | |
82 | .head = obj->head, | |
83 | }; | |
84 | ssize_t s; | |
85 | char val[128]; | |
86 | ||
87 | if (amdgpu_ras_error_query(obj->adev, &info)) | |
88 | return -EINVAL; | |
89 | ||
90 | s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n", | |
91 | "ue", info.ue_count, | |
92 | "ce", info.ce_count); | |
93 | if (*pos >= s) | |
94 | return 0; | |
95 | ||
96 | s -= *pos; | |
97 | s = min_t(u64, s, size); | |
98 | ||
99 | ||
100 | if (copy_to_user(buf, &val[*pos], s)) | |
101 | return -EINVAL; | |
102 | ||
103 | *pos += s; | |
104 | ||
105 | return s; | |
106 | } | |
107 | ||
c030f2e4 | 108 | static const struct file_operations amdgpu_ras_debugfs_ops = { |
109 | .owner = THIS_MODULE, | |
110 | .read = amdgpu_ras_debugfs_read, | |
190211ab | 111 | .write = NULL, |
c030f2e4 | 112 | .llseek = default_llseek |
113 | }; | |
114 | ||
96ebb307 | 115 | static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id) |
116 | { | |
117 | int i; | |
118 | ||
119 | for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) { | |
120 | *block_id = i; | |
121 | if (strcmp(name, ras_block_str(i)) == 0) | |
122 | return 0; | |
123 | } | |
124 | return -EINVAL; | |
125 | } | |
126 | ||
127 | static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, | |
128 | const char __user *buf, size_t size, | |
129 | loff_t *pos, struct ras_debug_if *data) | |
130 | { | |
131 | ssize_t s = min_t(u64, 64, size); | |
132 | char str[65]; | |
133 | char block_name[33]; | |
134 | char err[9] = "ue"; | |
135 | int op = -1; | |
136 | int block_id; | |
44494f96 | 137 | uint32_t sub_block; |
96ebb307 | 138 | u64 address, value; |
139 | ||
140 | if (*pos) | |
141 | return -EINVAL; | |
142 | *pos = size; | |
143 | ||
144 | memset(str, 0, sizeof(str)); | |
145 | memset(data, 0, sizeof(*data)); | |
146 | ||
147 | if (copy_from_user(str, buf, s)) | |
148 | return -EINVAL; | |
149 | ||
150 | if (sscanf(str, "disable %32s", block_name) == 1) | |
151 | op = 0; | |
152 | else if (sscanf(str, "enable %32s %8s", block_name, err) == 2) | |
153 | op = 1; | |
154 | else if (sscanf(str, "inject %32s %8s", block_name, err) == 2) | |
155 | op = 2; | |
d5ea093e AG |
156 | else if (sscanf(str, "reboot %32s", block_name) == 1) |
157 | op = 3; | |
b076296b | 158 | else if (str[0] && str[1] && str[2] && str[3]) |
96ebb307 | 159 | /* ascii string, but commands are not matched. */ |
160 | return -EINVAL; | |
161 | ||
162 | if (op != -1) { | |
163 | if (amdgpu_ras_find_block_id_by_name(block_name, &block_id)) | |
164 | return -EINVAL; | |
165 | ||
166 | data->head.block = block_id; | |
e1063493 TZ |
167 | /* only ue and ce errors are supported */ |
168 | if (!memcmp("ue", err, 2)) | |
169 | data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; | |
170 | else if (!memcmp("ce", err, 2)) | |
171 | data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE; | |
172 | else | |
173 | return -EINVAL; | |
174 | ||
96ebb307 | 175 | data->op = op; |
176 | ||
177 | if (op == 2) { | |
44494f96 TZ |
178 | if (sscanf(str, "%*s %*s %*s %u %llu %llu", |
179 | &sub_block, &address, &value) != 3) | |
180 | if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx", | |
181 | &sub_block, &address, &value) != 3) | |
96ebb307 | 182 | return -EINVAL; |
44494f96 | 183 | data->head.sub_block_index = sub_block; |
96ebb307 | 184 | data->inject.address = address; |
185 | data->inject.value = value; | |
186 | } | |
187 | } else { | |
73aa8e1a | 188 | if (size < sizeof(*data)) |
96ebb307 | 189 | return -EINVAL; |
190 | ||
191 | if (copy_from_user(data, buf, sizeof(*data))) | |
192 | return -EINVAL; | |
193 | } | |
194 | ||
195 | return 0; | |
196 | } | |
7c6e68c7 AG |
197 | |
198 | static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, | |
199 | struct ras_common_if *head); | |
200 | ||
74abc221 TSD |
201 | /** |
202 | * DOC: AMDGPU RAS debugfs control interface | |
36ea1bd2 | 203 | * |
204 | * It accepts struct ras_debug_if who has two members. | |
205 | * | |
206 | * First member: ras_debug_if::head or ras_debug_if::inject. | |
96ebb307 | 207 | * |
208 | * head is used to indicate which IP block will be under control. | |
36ea1bd2 | 209 | * |
210 | * head has four members, they are block, type, sub_block_index, name. | |
211 | * block: which IP will be under control. | |
212 | * type: what kind of error will be enabled/disabled/injected. | |
213 | * sub_block_index: some IPs have subcomponets. say, GFX, sDMA. | |
214 | * name: the name of IP. | |
215 | * | |
216 | * inject has two more members than head, they are address, value. | |
217 | * As their names indicate, inject operation will write the | |
218 | * value to the address. | |
219 | * | |
220 | * Second member: struct ras_debug_if::op. | |
54e9ab2e | 221 | * It has four kinds of operations. |
879e723d AZ |
222 | * |
223 | * - 0: disable RAS on the block. Take ::head as its data. | |
224 | * - 1: enable RAS on the block. Take ::head as its data. | |
225 | * - 2: inject errors on the block. Take ::inject as its data. | |
54e9ab2e | 226 | * - 3: reboot on unrecoverable error |
36ea1bd2 | 227 | * |
96ebb307 | 228 | * How to use the interface? |
229 | * programs: | |
230 | * copy the struct ras_debug_if in your codes and initialize it. | |
231 | * write the struct to the control node. | |
232 | * | |
879e723d AZ |
233 | * .. code-block:: bash |
234 | * | |
a20bfd0f | 235 | * echo op block [error [sub_block address value]] > .../ras/ras_ctrl |
879e723d AZ |
236 | * |
237 | * op: disable, enable, inject | |
238 | * disable: only block is needed | |
239 | * enable: block and error are needed | |
240 | * inject: error, address, value are needed | |
a20bfd0f | 241 | * block: umc, sdma, gfx, ......... |
879e723d AZ |
242 | * see ras_block_string[] for details |
243 | * error: ue, ce | |
244 | * ue: multi_uncorrectable | |
245 | * ce: single_correctable | |
246 | * sub_block: | |
247 | * sub block index, pass 0 if there is no sub block | |
248 | * | |
249 | * here are some examples for bash commands: | |
250 | * | |
251 | * .. code-block:: bash | |
96ebb307 | 252 | * |
44494f96 TZ |
253 | * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl |
254 | * echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl | |
96ebb307 | 255 | * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl |
256 | * | |
36ea1bd2 | 257 | * How to check the result? |
258 | * | |
259 | * For disable/enable, please check ras features at | |
260 | * /sys/class/drm/card[0/1/2...]/device/ras/features | |
261 | * | |
262 | * For inject, please check corresponding err count at | |
263 | * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count | |
264 | * | |
879e723d AZ |
265 | * .. note:: |
266 | * Operation is only allowed on blocks which are supported. | |
267 | * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask | |
36ea1bd2 | 268 | */ |
269 | static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *buf, | |
270 | size_t size, loff_t *pos) | |
271 | { | |
272 | struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; | |
273 | struct ras_debug_if data; | |
274 | int ret = 0; | |
275 | ||
96ebb307 | 276 | ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data); |
277 | if (ret) | |
36ea1bd2 | 278 | return -EINVAL; |
279 | ||
36ea1bd2 | 280 | if (!amdgpu_ras_is_supported(adev, data.head.block)) |
281 | return -EINVAL; | |
282 | ||
283 | switch (data.op) { | |
284 | case 0: | |
285 | ret = amdgpu_ras_feature_enable(adev, &data.head, 0); | |
286 | break; | |
287 | case 1: | |
288 | ret = amdgpu_ras_feature_enable(adev, &data.head, 1); | |
289 | break; | |
290 | case 2: | |
7cdc2ee3 TZ |
291 | if ((data.inject.address >= adev->gmc.mc_vram_size) || |
292 | (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) { | |
293 | ret = -EINVAL; | |
294 | break; | |
295 | } | |
296 | ||
6e4be987 TZ |
297 | /* umc ce/ue error injection for a bad page is not allowed */ |
298 | if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) && | |
299 | amdgpu_ras_check_bad_page(adev, data.inject.address)) { | |
300 | DRM_WARN("RAS WARN: 0x%llx has been marked as bad before error injection!\n", | |
301 | data.inject.address); | |
302 | break; | |
303 | } | |
304 | ||
7cdc2ee3 | 305 | /* data.inject.address is offset instead of absolute gpu address */ |
36ea1bd2 | 306 | ret = amdgpu_ras_error_inject(adev, &data.inject); |
307 | break; | |
d5ea093e AG |
308 | case 3: |
309 | amdgpu_ras_get_context(adev)->reboot = true; | |
310 | break; | |
96ebb307 | 311 | default: |
312 | ret = -EINVAL; | |
313 | break; | |
36ea1bd2 | 314 | }; |
315 | ||
316 | if (ret) | |
317 | return -EINVAL; | |
318 | ||
319 | return size; | |
320 | } | |
321 | ||
084fe13b AG |
322 | /** |
323 | * DOC: AMDGPU RAS debugfs EEPROM table reset interface | |
324 | * | |
f77c7109 AD |
325 | * Some boards contain an EEPROM which is used to persistently store a list of |
326 | * bad pages containing ECC errors detected in vram. This interface provides | |
327 | * a way to reset the EEPROM, e.g., after testing error injection. | |
328 | * | |
329 | * Usage: | |
330 | * | |
331 | * .. code-block:: bash | |
332 | * | |
333 | * echo 1 > ../ras/ras_eeprom_reset | |
334 | * | |
335 | * will reset EEPROM table to 0 entries. | |
336 | * | |
084fe13b AG |
337 | */ |
338 | static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf, | |
339 | size_t size, loff_t *pos) | |
340 | { | |
341 | struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; | |
342 | int ret; | |
343 | ||
344 | ret = amdgpu_ras_eeprom_reset_table(&adev->psp.ras.ras->eeprom_control); | |
345 | ||
346 | return ret == 1 ? size : -EIO; | |
347 | } | |
348 | ||
36ea1bd2 | 349 | static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = { |
350 | .owner = THIS_MODULE, | |
351 | .read = NULL, | |
352 | .write = amdgpu_ras_debugfs_ctrl_write, | |
353 | .llseek = default_llseek | |
354 | }; | |
355 | ||
084fe13b AG |
356 | static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = { |
357 | .owner = THIS_MODULE, | |
358 | .read = NULL, | |
359 | .write = amdgpu_ras_debugfs_eeprom_write, | |
360 | .llseek = default_llseek | |
361 | }; | |
362 | ||
f77c7109 AD |
363 | /** |
364 | * DOC: AMDGPU RAS sysfs Error Count Interface | |
365 | * | |
366 | * It allows user to read the error count for each IP block on the gpu through | |
367 | * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count | |
368 | * | |
369 | * It outputs the multiple lines which report the uncorrected (ue) and corrected | |
370 | * (ce) error counts. | |
371 | * | |
372 | * The format of one line is below, | |
373 | * | |
374 | * [ce|ue]: count | |
375 | * | |
376 | * Example: | |
377 | * | |
378 | * .. code-block:: bash | |
379 | * | |
380 | * ue: 0 | |
381 | * ce: 1 | |
382 | * | |
383 | */ | |
c030f2e4 | 384 | static ssize_t amdgpu_ras_sysfs_read(struct device *dev, |
385 | struct device_attribute *attr, char *buf) | |
386 | { | |
387 | struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr); | |
388 | struct ras_query_if info = { | |
389 | .head = obj->head, | |
390 | }; | |
391 | ||
392 | if (amdgpu_ras_error_query(obj->adev, &info)) | |
393 | return -EINVAL; | |
394 | ||
395 | return snprintf(buf, PAGE_SIZE, "%s: %lu\n%s: %lu\n", | |
396 | "ue", info.ue_count, | |
397 | "ce", info.ce_count); | |
398 | } | |
399 | ||
400 | /* obj begin */ | |
401 | ||
402 | #define get_obj(obj) do { (obj)->use++; } while (0) | |
403 | #define alive_obj(obj) ((obj)->use) | |
404 | ||
405 | static inline void put_obj(struct ras_manager *obj) | |
406 | { | |
407 | if (obj && --obj->use == 0) | |
408 | list_del(&obj->node); | |
409 | if (obj && obj->use < 0) { | |
410 | DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name); | |
411 | } | |
412 | } | |
413 | ||
414 | /* make one obj and return it. */ | |
415 | static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev, | |
416 | struct ras_common_if *head) | |
417 | { | |
418 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
419 | struct ras_manager *obj; | |
420 | ||
421 | if (!con) | |
422 | return NULL; | |
423 | ||
424 | if (head->block >= AMDGPU_RAS_BLOCK_COUNT) | |
425 | return NULL; | |
426 | ||
427 | obj = &con->objs[head->block]; | |
428 | /* already exist. return obj? */ | |
429 | if (alive_obj(obj)) | |
430 | return NULL; | |
431 | ||
432 | obj->head = *head; | |
433 | obj->adev = adev; | |
434 | list_add(&obj->node, &con->head); | |
435 | get_obj(obj); | |
436 | ||
437 | return obj; | |
438 | } | |
439 | ||
440 | /* return an obj equal to head, or the first when head is NULL */ | |
441 | static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, | |
442 | struct ras_common_if *head) | |
443 | { | |
444 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
445 | struct ras_manager *obj; | |
446 | int i; | |
447 | ||
448 | if (!con) | |
449 | return NULL; | |
450 | ||
451 | if (head) { | |
452 | if (head->block >= AMDGPU_RAS_BLOCK_COUNT) | |
453 | return NULL; | |
454 | ||
455 | obj = &con->objs[head->block]; | |
456 | ||
457 | if (alive_obj(obj)) { | |
458 | WARN_ON(head->block != obj->head.block); | |
459 | return obj; | |
460 | } | |
461 | } else { | |
462 | for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) { | |
463 | obj = &con->objs[i]; | |
464 | if (alive_obj(obj)) { | |
465 | WARN_ON(i != obj->head.block); | |
466 | return obj; | |
467 | } | |
468 | } | |
469 | } | |
470 | ||
471 | return NULL; | |
472 | } | |
473 | /* obj end */ | |
474 | ||
475 | /* feature ctl begin */ | |
476 | static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev, | |
477 | struct ras_common_if *head) | |
478 | { | |
5caf466a | 479 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
480 | ||
481 | return con->hw_supported & BIT(head->block); | |
c030f2e4 | 482 | } |
483 | ||
484 | static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev, | |
485 | struct ras_common_if *head) | |
486 | { | |
487 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
488 | ||
489 | return con->features & BIT(head->block); | |
490 | } | |
491 | ||
492 | /* | |
493 | * if obj is not created, then create one. | |
494 | * set feature enable flag. | |
495 | */ | |
496 | static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev, | |
497 | struct ras_common_if *head, int enable) | |
498 | { | |
499 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
500 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); | |
501 | ||
5caf466a | 502 | /* If hardware does not support ras, then do not create obj. |
503 | * But if hardware support ras, we can create the obj. | |
504 | * Ras framework checks con->hw_supported to see if it need do | |
505 | * corresponding initialization. | |
506 | * IP checks con->support to see if it need disable ras. | |
507 | */ | |
c030f2e4 | 508 | if (!amdgpu_ras_is_feature_allowed(adev, head)) |
509 | return 0; | |
510 | if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) | |
511 | return 0; | |
512 | ||
513 | if (enable) { | |
514 | if (!obj) { | |
515 | obj = amdgpu_ras_create_obj(adev, head); | |
516 | if (!obj) | |
517 | return -EINVAL; | |
518 | } else { | |
519 | /* In case we create obj somewhere else */ | |
520 | get_obj(obj); | |
521 | } | |
522 | con->features |= BIT(head->block); | |
523 | } else { | |
524 | if (obj && amdgpu_ras_is_feature_enabled(adev, head)) { | |
525 | con->features &= ~BIT(head->block); | |
526 | put_obj(obj); | |
527 | } | |
528 | } | |
529 | ||
530 | return 0; | |
531 | } | |
532 | ||
533 | /* wrapper of psp_ras_enable_features */ | |
534 | int amdgpu_ras_feature_enable(struct amdgpu_device *adev, | |
535 | struct ras_common_if *head, bool enable) | |
536 | { | |
537 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
538 | union ta_ras_cmd_input info; | |
539 | int ret; | |
540 | ||
541 | if (!con) | |
542 | return -EINVAL; | |
543 | ||
544 | if (!enable) { | |
545 | info.disable_features = (struct ta_ras_disable_features_input) { | |
828cfa29 | 546 | .block_id = amdgpu_ras_block_to_ta(head->block), |
547 | .error_type = amdgpu_ras_error_to_ta(head->type), | |
c030f2e4 | 548 | }; |
549 | } else { | |
550 | info.enable_features = (struct ta_ras_enable_features_input) { | |
828cfa29 | 551 | .block_id = amdgpu_ras_block_to_ta(head->block), |
552 | .error_type = amdgpu_ras_error_to_ta(head->type), | |
c030f2e4 | 553 | }; |
554 | } | |
555 | ||
556 | /* Do not enable if it is not allowed. */ | |
557 | WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head)); | |
558 | /* Are we alerady in that state we are going to set? */ | |
559 | if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) | |
560 | return 0; | |
561 | ||
562 | ret = psp_ras_enable_features(&adev->psp, &info, enable); | |
563 | if (ret) { | |
564 | DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n", | |
565 | enable ? "enable":"disable", | |
566 | ras_block_str(head->block), | |
567 | ret); | |
7af23ebe | 568 | if (ret == TA_RAS_STATUS__RESET_NEEDED) |
569 | return -EAGAIN; | |
c030f2e4 | 570 | return -EINVAL; |
571 | } | |
572 | ||
573 | /* setup the obj */ | |
574 | __amdgpu_ras_feature_enable(adev, head, enable); | |
575 | ||
576 | return 0; | |
577 | } | |
578 | ||
77de502b | 579 | /* Only used in device probe stage and called only once. */ |
580 | int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev, | |
581 | struct ras_common_if *head, bool enable) | |
582 | { | |
583 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
584 | int ret; | |
585 | ||
586 | if (!con) | |
587 | return -EINVAL; | |
588 | ||
589 | if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { | |
7af23ebe | 590 | if (enable) { |
591 | /* There is no harm to issue a ras TA cmd regardless of | |
592 | * the currecnt ras state. | |
593 | * If current state == target state, it will do nothing | |
594 | * But sometimes it requests driver to reset and repost | |
595 | * with error code -EAGAIN. | |
596 | */ | |
597 | ret = amdgpu_ras_feature_enable(adev, head, 1); | |
598 | /* With old ras TA, we might fail to enable ras. | |
599 | * Log it and just setup the object. | |
600 | * TODO need remove this WA in the future. | |
601 | */ | |
602 | if (ret == -EINVAL) { | |
603 | ret = __amdgpu_ras_feature_enable(adev, head, 1); | |
604 | if (!ret) | |
605 | DRM_INFO("RAS INFO: %s setup object\n", | |
606 | ras_block_str(head->block)); | |
607 | } | |
608 | } else { | |
609 | /* setup the object then issue a ras TA disable cmd.*/ | |
610 | ret = __amdgpu_ras_feature_enable(adev, head, 1); | |
611 | if (ret) | |
612 | return ret; | |
77de502b | 613 | |
77de502b | 614 | ret = amdgpu_ras_feature_enable(adev, head, 0); |
7af23ebe | 615 | } |
77de502b | 616 | } else |
617 | ret = amdgpu_ras_feature_enable(adev, head, enable); | |
618 | ||
619 | return ret; | |
620 | } | |
621 | ||
c030f2e4 | 622 | static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev, |
623 | bool bypass) | |
624 | { | |
625 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
626 | struct ras_manager *obj, *tmp; | |
627 | ||
628 | list_for_each_entry_safe(obj, tmp, &con->head, node) { | |
629 | /* bypass psp. | |
630 | * aka just release the obj and corresponding flags | |
631 | */ | |
632 | if (bypass) { | |
633 | if (__amdgpu_ras_feature_enable(adev, &obj->head, 0)) | |
634 | break; | |
635 | } else { | |
636 | if (amdgpu_ras_feature_enable(adev, &obj->head, 0)) | |
637 | break; | |
638 | } | |
289d513b | 639 | } |
c030f2e4 | 640 | |
641 | return con->features; | |
642 | } | |
643 | ||
644 | static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev, | |
645 | bool bypass) | |
646 | { | |
647 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
648 | int ras_block_count = AMDGPU_RAS_BLOCK_COUNT; | |
649 | int i; | |
191051a1 | 650 | const enum amdgpu_ras_error_type default_ras_type = |
651 | AMDGPU_RAS_ERROR__NONE; | |
c030f2e4 | 652 | |
653 | for (i = 0; i < ras_block_count; i++) { | |
654 | struct ras_common_if head = { | |
655 | .block = i, | |
191051a1 | 656 | .type = default_ras_type, |
c030f2e4 | 657 | .sub_block_index = 0, |
658 | }; | |
659 | strcpy(head.name, ras_block_str(i)); | |
660 | if (bypass) { | |
661 | /* | |
662 | * bypass psp. vbios enable ras for us. | |
663 | * so just create the obj | |
664 | */ | |
665 | if (__amdgpu_ras_feature_enable(adev, &head, 1)) | |
666 | break; | |
667 | } else { | |
668 | if (amdgpu_ras_feature_enable(adev, &head, 1)) | |
669 | break; | |
670 | } | |
289d513b | 671 | } |
c030f2e4 | 672 | |
673 | return con->features; | |
674 | } | |
675 | /* feature ctl end */ | |
676 | ||
677 | /* query/inject/cure begin */ | |
678 | int amdgpu_ras_error_query(struct amdgpu_device *adev, | |
679 | struct ras_query_if *info) | |
680 | { | |
681 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); | |
6f102dba | 682 | struct ras_err_data err_data = {0, 0, 0, NULL}; |
c030f2e4 | 683 | |
684 | if (!obj) | |
685 | return -EINVAL; | |
c030f2e4 | 686 | |
939e2258 HZ |
687 | switch (info->head.block) { |
688 | case AMDGPU_RAS_BLOCK__UMC: | |
045c0216 TZ |
689 | if (adev->umc.funcs->query_ras_error_count) |
690 | adev->umc.funcs->query_ras_error_count(adev, &err_data); | |
13b7c46c TZ |
691 | /* umc query_ras_error_address is also responsible for clearing |
692 | * error status | |
693 | */ | |
694 | if (adev->umc.funcs->query_ras_error_address) | |
695 | adev->umc.funcs->query_ras_error_address(adev, &err_data); | |
939e2258 | 696 | break; |
83b0582c DL |
697 | case AMDGPU_RAS_BLOCK__GFX: |
698 | if (adev->gfx.funcs->query_ras_error_count) | |
699 | adev->gfx.funcs->query_ras_error_count(adev, &err_data); | |
700 | break; | |
9fb2d8de | 701 | case AMDGPU_RAS_BLOCK__MMHUB: |
d65bf1f8 TZ |
702 | if (adev->mmhub.funcs->query_ras_error_count) |
703 | adev->mmhub.funcs->query_ras_error_count(adev, &err_data); | |
9fb2d8de | 704 | break; |
d7bd680d GC |
705 | case AMDGPU_RAS_BLOCK__PCIE_BIF: |
706 | if (adev->nbio.funcs->query_ras_error_count) | |
707 | adev->nbio.funcs->query_ras_error_count(adev, &err_data); | |
708 | break; | |
939e2258 HZ |
709 | default: |
710 | break; | |
711 | } | |
05a58345 TZ |
712 | |
713 | obj->err_data.ue_count += err_data.ue_count; | |
714 | obj->err_data.ce_count += err_data.ce_count; | |
715 | ||
c030f2e4 | 716 | info->ue_count = obj->err_data.ue_count; |
717 | info->ce_count = obj->err_data.ce_count; | |
718 | ||
7c6e68c7 | 719 | if (err_data.ce_count) { |
05a58345 TZ |
720 | dev_info(adev->dev, "%ld correctable errors detected in %s block\n", |
721 | obj->err_data.ce_count, ras_block_str(info->head.block)); | |
7c6e68c7 AG |
722 | } |
723 | if (err_data.ue_count) { | |
05a58345 TZ |
724 | dev_info(adev->dev, "%ld uncorrectable errors detected in %s block\n", |
725 | obj->err_data.ue_count, ras_block_str(info->head.block)); | |
7c6e68c7 | 726 | } |
05a58345 | 727 | |
c030f2e4 | 728 | return 0; |
729 | } | |
730 | ||
731 | /* wrapper of psp_ras_trigger_error */ | |
732 | int amdgpu_ras_error_inject(struct amdgpu_device *adev, | |
733 | struct ras_inject_if *info) | |
734 | { | |
735 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); | |
736 | struct ta_ras_trigger_error_input block_info = { | |
828cfa29 | 737 | .block_id = amdgpu_ras_block_to_ta(info->head.block), |
738 | .inject_error_type = amdgpu_ras_error_to_ta(info->head.type), | |
c030f2e4 | 739 | .sub_block_index = info->head.sub_block_index, |
740 | .address = info->address, | |
741 | .value = info->value, | |
742 | }; | |
743 | int ret = 0; | |
744 | ||
745 | if (!obj) | |
746 | return -EINVAL; | |
747 | ||
83b0582c DL |
748 | switch (info->head.block) { |
749 | case AMDGPU_RAS_BLOCK__GFX: | |
750 | if (adev->gfx.funcs->ras_error_inject) | |
751 | ret = adev->gfx.funcs->ras_error_inject(adev, info); | |
752 | else | |
753 | ret = -EINVAL; | |
754 | break; | |
755 | case AMDGPU_RAS_BLOCK__UMC: | |
9fb2d8de | 756 | case AMDGPU_RAS_BLOCK__MMHUB: |
f3170352 | 757 | case AMDGPU_RAS_BLOCK__XGMI_WAFL: |
d7bd680d | 758 | case AMDGPU_RAS_BLOCK__PCIE_BIF: |
83b0582c DL |
759 | ret = psp_ras_trigger_error(&adev->psp, &block_info); |
760 | break; | |
761 | default: | |
a5dd40ca HZ |
762 | DRM_INFO("%s error injection is not supported yet\n", |
763 | ras_block_str(info->head.block)); | |
83b0582c | 764 | ret = -EINVAL; |
a5dd40ca HZ |
765 | } |
766 | ||
c030f2e4 | 767 | if (ret) |
768 | DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n", | |
769 | ras_block_str(info->head.block), | |
770 | ret); | |
771 | ||
772 | return ret; | |
773 | } | |
774 | ||
775 | int amdgpu_ras_error_cure(struct amdgpu_device *adev, | |
776 | struct ras_cure_if *info) | |
777 | { | |
778 | /* psp fw has no cure interface for now. */ | |
779 | return 0; | |
780 | } | |
781 | ||
782 | /* get the total error counts on all IPs */ | |
64cc5414 | 783 | unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev, |
c030f2e4 | 784 | bool is_ce) |
785 | { | |
786 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
787 | struct ras_manager *obj; | |
788 | struct ras_err_data data = {0, 0}; | |
789 | ||
790 | if (!con) | |
64cc5414 | 791 | return 0; |
c030f2e4 | 792 | |
793 | list_for_each_entry(obj, &con->head, node) { | |
794 | struct ras_query_if info = { | |
795 | .head = obj->head, | |
796 | }; | |
797 | ||
798 | if (amdgpu_ras_error_query(adev, &info)) | |
64cc5414 | 799 | return 0; |
c030f2e4 | 800 | |
801 | data.ce_count += info.ce_count; | |
802 | data.ue_count += info.ue_count; | |
803 | } | |
804 | ||
805 | return is_ce ? data.ce_count : data.ue_count; | |
806 | } | |
807 | /* query/inject/cure end */ | |
808 | ||
809 | ||
810 | /* sysfs begin */ | |
811 | ||
466b1793 | 812 | static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, |
813 | struct ras_badpage **bps, unsigned int *count); | |
814 | ||
815 | static char *amdgpu_ras_badpage_flags_str(unsigned int flags) | |
816 | { | |
817 | switch (flags) { | |
818 | case 0: | |
819 | return "R"; | |
820 | case 1: | |
821 | return "P"; | |
822 | case 2: | |
823 | default: | |
824 | return "F"; | |
825 | }; | |
826 | } | |
827 | ||
f77c7109 AD |
828 | /** |
829 | * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface | |
466b1793 | 830 | * |
831 | * It allows user to read the bad pages of vram on the gpu through | |
832 | * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages | |
833 | * | |
834 | * It outputs multiple lines, and each line stands for one gpu page. | |
835 | * | |
836 | * The format of one line is below, | |
837 | * gpu pfn : gpu page size : flags | |
838 | * | |
839 | * gpu pfn and gpu page size are printed in hex format. | |
840 | * flags can be one of below character, | |
f77c7109 | 841 | * |
466b1793 | 842 | * R: reserved, this gpu page is reserved and not able to use. |
f77c7109 | 843 | * |
466b1793 | 844 | * P: pending for reserve, this gpu page is marked as bad, will be reserved |
f77c7109 AD |
845 | * in next window of page_reserve. |
846 | * | |
466b1793 | 847 | * F: unable to reserve. this gpu page can't be reserved due to some reasons. |
848 | * | |
f77c7109 AD |
849 | * Examples: |
850 | * | |
851 | * .. code-block:: bash | |
852 | * | |
853 | * 0x00000001 : 0x00001000 : R | |
854 | * 0x00000002 : 0x00001000 : P | |
855 | * | |
466b1793 | 856 | */ |
857 | ||
858 | static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f, | |
859 | struct kobject *kobj, struct bin_attribute *attr, | |
860 | char *buf, loff_t ppos, size_t count) | |
861 | { | |
862 | struct amdgpu_ras *con = | |
863 | container_of(attr, struct amdgpu_ras, badpages_attr); | |
864 | struct amdgpu_device *adev = con->adev; | |
865 | const unsigned int element_size = | |
866 | sizeof("0xabcdabcd : 0x12345678 : R\n") - 1; | |
d6ee400e SA |
867 | unsigned int start = div64_ul(ppos + element_size - 1, element_size); |
868 | unsigned int end = div64_ul(ppos + count - 1, element_size); | |
466b1793 | 869 | ssize_t s = 0; |
870 | struct ras_badpage *bps = NULL; | |
871 | unsigned int bps_count = 0; | |
872 | ||
873 | memset(buf, 0, count); | |
874 | ||
875 | if (amdgpu_ras_badpages_read(adev, &bps, &bps_count)) | |
876 | return 0; | |
877 | ||
878 | for (; start < end && start < bps_count; start++) | |
879 | s += scnprintf(&buf[s], element_size + 1, | |
880 | "0x%08x : 0x%08x : %1s\n", | |
881 | bps[start].bp, | |
882 | bps[start].size, | |
883 | amdgpu_ras_badpage_flags_str(bps[start].flags)); | |
884 | ||
885 | kfree(bps); | |
886 | ||
887 | return s; | |
888 | } | |
889 | ||
c030f2e4 | 890 | static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev, |
891 | struct device_attribute *attr, char *buf) | |
892 | { | |
893 | struct amdgpu_ras *con = | |
894 | container_of(attr, struct amdgpu_ras, features_attr); | |
c030f2e4 | 895 | |
5212a3bd | 896 | return scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features); |
c030f2e4 | 897 | } |
898 | ||
899 | static int amdgpu_ras_sysfs_create_feature_node(struct amdgpu_device *adev) | |
900 | { | |
901 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
902 | struct attribute *attrs[] = { | |
903 | &con->features_attr.attr, | |
904 | NULL | |
905 | }; | |
466b1793 | 906 | struct bin_attribute *bin_attrs[] = { |
907 | &con->badpages_attr, | |
908 | NULL | |
909 | }; | |
c030f2e4 | 910 | struct attribute_group group = { |
911 | .name = "ras", | |
912 | .attrs = attrs, | |
466b1793 | 913 | .bin_attrs = bin_attrs, |
c030f2e4 | 914 | }; |
915 | ||
916 | con->features_attr = (struct device_attribute) { | |
917 | .attr = { | |
918 | .name = "features", | |
919 | .mode = S_IRUGO, | |
920 | }, | |
921 | .show = amdgpu_ras_sysfs_features_read, | |
922 | }; | |
466b1793 | 923 | |
924 | con->badpages_attr = (struct bin_attribute) { | |
925 | .attr = { | |
926 | .name = "gpu_vram_bad_pages", | |
927 | .mode = S_IRUGO, | |
928 | }, | |
929 | .size = 0, | |
930 | .private = NULL, | |
931 | .read = amdgpu_ras_sysfs_badpages_read, | |
932 | }; | |
933 | ||
163def43 | 934 | sysfs_attr_init(attrs[0]); |
466b1793 | 935 | sysfs_bin_attr_init(bin_attrs[0]); |
c030f2e4 | 936 | |
937 | return sysfs_create_group(&adev->dev->kobj, &group); | |
938 | } | |
939 | ||
940 | static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev) | |
941 | { | |
942 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
943 | struct attribute *attrs[] = { | |
944 | &con->features_attr.attr, | |
945 | NULL | |
946 | }; | |
466b1793 | 947 | struct bin_attribute *bin_attrs[] = { |
948 | &con->badpages_attr, | |
949 | NULL | |
950 | }; | |
c030f2e4 | 951 | struct attribute_group group = { |
952 | .name = "ras", | |
953 | .attrs = attrs, | |
466b1793 | 954 | .bin_attrs = bin_attrs, |
c030f2e4 | 955 | }; |
956 | ||
957 | sysfs_remove_group(&adev->dev->kobj, &group); | |
958 | ||
959 | return 0; | |
960 | } | |
961 | ||
962 | int amdgpu_ras_sysfs_create(struct amdgpu_device *adev, | |
963 | struct ras_fs_if *head) | |
964 | { | |
965 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head); | |
966 | ||
967 | if (!obj || obj->attr_inuse) | |
968 | return -EINVAL; | |
969 | ||
970 | get_obj(obj); | |
971 | ||
972 | memcpy(obj->fs_data.sysfs_name, | |
973 | head->sysfs_name, | |
974 | sizeof(obj->fs_data.sysfs_name)); | |
975 | ||
976 | obj->sysfs_attr = (struct device_attribute){ | |
977 | .attr = { | |
978 | .name = obj->fs_data.sysfs_name, | |
979 | .mode = S_IRUGO, | |
980 | }, | |
981 | .show = amdgpu_ras_sysfs_read, | |
982 | }; | |
163def43 | 983 | sysfs_attr_init(&obj->sysfs_attr.attr); |
c030f2e4 | 984 | |
985 | if (sysfs_add_file_to_group(&adev->dev->kobj, | |
986 | &obj->sysfs_attr.attr, | |
987 | "ras")) { | |
988 | put_obj(obj); | |
989 | return -EINVAL; | |
990 | } | |
991 | ||
992 | obj->attr_inuse = 1; | |
993 | ||
994 | return 0; | |
995 | } | |
996 | ||
997 | int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev, | |
998 | struct ras_common_if *head) | |
999 | { | |
1000 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); | |
1001 | ||
1002 | if (!obj || !obj->attr_inuse) | |
1003 | return -EINVAL; | |
1004 | ||
1005 | sysfs_remove_file_from_group(&adev->dev->kobj, | |
1006 | &obj->sysfs_attr.attr, | |
1007 | "ras"); | |
1008 | obj->attr_inuse = 0; | |
1009 | put_obj(obj); | |
1010 | ||
1011 | return 0; | |
1012 | } | |
1013 | ||
1014 | static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev) | |
1015 | { | |
1016 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
1017 | struct ras_manager *obj, *tmp; | |
1018 | ||
1019 | list_for_each_entry_safe(obj, tmp, &con->head, node) { | |
1020 | amdgpu_ras_sysfs_remove(adev, &obj->head); | |
1021 | } | |
1022 | ||
1023 | amdgpu_ras_sysfs_remove_feature_node(adev); | |
1024 | ||
1025 | return 0; | |
1026 | } | |
1027 | /* sysfs end */ | |
1028 | ||
1029 | /* debugfs begin */ | |
450f30ea | 1030 | static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev) |
36ea1bd2 | 1031 | { |
1032 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
1033 | struct drm_minor *minor = adev->ddev->primary; | |
36ea1bd2 | 1034 | |
450f30ea | 1035 | con->dir = debugfs_create_dir("ras", minor->debugfs_root); |
012dd14d GC |
1036 | debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir, |
1037 | adev, &amdgpu_ras_debugfs_ctrl_ops); | |
1038 | debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, con->dir, | |
1039 | adev, &amdgpu_ras_debugfs_eeprom_ops); | |
36ea1bd2 | 1040 | } |
1041 | ||
450f30ea | 1042 | void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, |
c030f2e4 | 1043 | struct ras_fs_if *head) |
1044 | { | |
1045 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
1046 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head); | |
c030f2e4 | 1047 | |
1048 | if (!obj || obj->ent) | |
450f30ea | 1049 | return; |
c030f2e4 | 1050 | |
1051 | get_obj(obj); | |
1052 | ||
1053 | memcpy(obj->fs_data.debugfs_name, | |
1054 | head->debugfs_name, | |
1055 | sizeof(obj->fs_data.debugfs_name)); | |
1056 | ||
450f30ea GKH |
1057 | obj->ent = debugfs_create_file(obj->fs_data.debugfs_name, |
1058 | S_IWUGO | S_IRUGO, con->dir, obj, | |
1059 | &amdgpu_ras_debugfs_ops); | |
c030f2e4 | 1060 | } |
1061 | ||
450f30ea | 1062 | void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev, |
c030f2e4 | 1063 | struct ras_common_if *head) |
1064 | { | |
1065 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); | |
1066 | ||
1067 | if (!obj || !obj->ent) | |
450f30ea | 1068 | return; |
c030f2e4 | 1069 | |
1070 | debugfs_remove(obj->ent); | |
1071 | obj->ent = NULL; | |
1072 | put_obj(obj); | |
c030f2e4 | 1073 | } |
1074 | ||
450f30ea | 1075 | static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev) |
c030f2e4 | 1076 | { |
1077 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
1078 | struct ras_manager *obj, *tmp; | |
1079 | ||
1080 | list_for_each_entry_safe(obj, tmp, &con->head, node) { | |
1081 | amdgpu_ras_debugfs_remove(adev, &obj->head); | |
1082 | } | |
1083 | ||
012dd14d | 1084 | debugfs_remove_recursive(con->dir); |
c030f2e4 | 1085 | con->dir = NULL; |
c030f2e4 | 1086 | } |
1087 | /* debugfs end */ | |
1088 | ||
1089 | /* ras fs */ | |
1090 | ||
1091 | static int amdgpu_ras_fs_init(struct amdgpu_device *adev) | |
1092 | { | |
c030f2e4 | 1093 | amdgpu_ras_sysfs_create_feature_node(adev); |
36ea1bd2 | 1094 | amdgpu_ras_debugfs_create_ctrl_node(adev); |
c030f2e4 | 1095 | |
1096 | return 0; | |
1097 | } | |
1098 | ||
1099 | static int amdgpu_ras_fs_fini(struct amdgpu_device *adev) | |
1100 | { | |
1101 | amdgpu_ras_debugfs_remove_all(adev); | |
1102 | amdgpu_ras_sysfs_remove_all(adev); | |
1103 | return 0; | |
1104 | } | |
1105 | /* ras fs end */ | |
1106 | ||
1107 | /* ih begin */ | |
1108 | static void amdgpu_ras_interrupt_handler(struct ras_manager *obj) | |
1109 | { | |
1110 | struct ras_ih_data *data = &obj->ih_data; | |
1111 | struct amdgpu_iv_entry entry; | |
1112 | int ret; | |
cf04dfd0 | 1113 | struct ras_err_data err_data = {0, 0, 0, NULL}; |
c030f2e4 | 1114 | |
1115 | while (data->rptr != data->wptr) { | |
1116 | rmb(); | |
1117 | memcpy(&entry, &data->ring[data->rptr], | |
1118 | data->element_size); | |
1119 | ||
1120 | wmb(); | |
1121 | data->rptr = (data->aligned_element_size + | |
1122 | data->rptr) % data->ring_size; | |
1123 | ||
1124 | /* Let IP handle its data, maybe we need get the output | |
1125 | * from the callback to udpate the error type/count, etc | |
1126 | */ | |
1127 | if (data->cb) { | |
cf04dfd0 | 1128 | ret = data->cb(obj->adev, &err_data, &entry); |
c030f2e4 | 1129 | /* ue will trigger an interrupt, and in that case |
1130 | * we need do a reset to recovery the whole system. | |
1131 | * But leave IP do that recovery, here we just dispatch | |
1132 | * the error. | |
1133 | */ | |
bd2280da | 1134 | if (ret == AMDGPU_RAS_SUCCESS) { |
51437623 TZ |
1135 | /* these counts could be left as 0 if |
1136 | * some blocks do not count error number | |
1137 | */ | |
cf04dfd0 | 1138 | obj->err_data.ue_count += err_data.ue_count; |
51437623 | 1139 | obj->err_data.ce_count += err_data.ce_count; |
c030f2e4 | 1140 | } |
c030f2e4 | 1141 | } |
1142 | } | |
1143 | } | |
1144 | ||
1145 | static void amdgpu_ras_interrupt_process_handler(struct work_struct *work) | |
1146 | { | |
1147 | struct ras_ih_data *data = | |
1148 | container_of(work, struct ras_ih_data, ih_work); | |
1149 | struct ras_manager *obj = | |
1150 | container_of(data, struct ras_manager, ih_data); | |
1151 | ||
1152 | amdgpu_ras_interrupt_handler(obj); | |
1153 | } | |
1154 | ||
1155 | int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev, | |
1156 | struct ras_dispatch_if *info) | |
1157 | { | |
1158 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); | |
1159 | struct ras_ih_data *data = &obj->ih_data; | |
1160 | ||
1161 | if (!obj) | |
1162 | return -EINVAL; | |
1163 | ||
1164 | if (data->inuse == 0) | |
1165 | return 0; | |
1166 | ||
1167 | /* Might be overflow... */ | |
1168 | memcpy(&data->ring[data->wptr], info->entry, | |
1169 | data->element_size); | |
1170 | ||
1171 | wmb(); | |
1172 | data->wptr = (data->aligned_element_size + | |
1173 | data->wptr) % data->ring_size; | |
1174 | ||
1175 | schedule_work(&data->ih_work); | |
1176 | ||
1177 | return 0; | |
1178 | } | |
1179 | ||
1180 | int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev, | |
1181 | struct ras_ih_if *info) | |
1182 | { | |
1183 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); | |
1184 | struct ras_ih_data *data; | |
1185 | ||
1186 | if (!obj) | |
1187 | return -EINVAL; | |
1188 | ||
1189 | data = &obj->ih_data; | |
1190 | if (data->inuse == 0) | |
1191 | return 0; | |
1192 | ||
1193 | cancel_work_sync(&data->ih_work); | |
1194 | ||
1195 | kfree(data->ring); | |
1196 | memset(data, 0, sizeof(*data)); | |
1197 | put_obj(obj); | |
1198 | ||
1199 | return 0; | |
1200 | } | |
1201 | ||
1202 | int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev, | |
1203 | struct ras_ih_if *info) | |
1204 | { | |
1205 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); | |
1206 | struct ras_ih_data *data; | |
1207 | ||
1208 | if (!obj) { | |
1209 | /* in case we registe the IH before enable ras feature */ | |
1210 | obj = amdgpu_ras_create_obj(adev, &info->head); | |
1211 | if (!obj) | |
1212 | return -EINVAL; | |
1213 | } else | |
1214 | get_obj(obj); | |
1215 | ||
1216 | data = &obj->ih_data; | |
1217 | /* add the callback.etc */ | |
1218 | *data = (struct ras_ih_data) { | |
1219 | .inuse = 0, | |
1220 | .cb = info->cb, | |
1221 | .element_size = sizeof(struct amdgpu_iv_entry), | |
1222 | .rptr = 0, | |
1223 | .wptr = 0, | |
1224 | }; | |
1225 | ||
1226 | INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler); | |
1227 | ||
1228 | data->aligned_element_size = ALIGN(data->element_size, 8); | |
1229 | /* the ring can store 64 iv entries. */ | |
1230 | data->ring_size = 64 * data->aligned_element_size; | |
1231 | data->ring = kmalloc(data->ring_size, GFP_KERNEL); | |
1232 | if (!data->ring) { | |
1233 | put_obj(obj); | |
1234 | return -ENOMEM; | |
1235 | } | |
1236 | ||
1237 | /* IH is ready */ | |
1238 | data->inuse = 1; | |
1239 | ||
1240 | return 0; | |
1241 | } | |
1242 | ||
1243 | static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev) | |
1244 | { | |
1245 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
1246 | struct ras_manager *obj, *tmp; | |
1247 | ||
1248 | list_for_each_entry_safe(obj, tmp, &con->head, node) { | |
1249 | struct ras_ih_if info = { | |
1250 | .head = obj->head, | |
1251 | }; | |
1252 | amdgpu_ras_interrupt_remove_handler(adev, &info); | |
1253 | } | |
1254 | ||
1255 | return 0; | |
1256 | } | |
1257 | /* ih end */ | |
1258 | ||
1259 | /* recovery begin */ | |
466b1793 | 1260 | |
1261 | /* return 0 on success. | |
1262 | * caller need free bps. | |
1263 | */ | |
1264 | static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, | |
1265 | struct ras_badpage **bps, unsigned int *count) | |
1266 | { | |
1267 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
1268 | struct ras_err_handler_data *data; | |
1269 | int i = 0; | |
1270 | int ret = 0; | |
1271 | ||
1272 | if (!con || !con->eh_data || !bps || !count) | |
1273 | return -EINVAL; | |
1274 | ||
1275 | mutex_lock(&con->recovery_lock); | |
1276 | data = con->eh_data; | |
1277 | if (!data || data->count == 0) { | |
1278 | *bps = NULL; | |
1279 | goto out; | |
1280 | } | |
1281 | ||
1282 | *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL); | |
1283 | if (!*bps) { | |
1284 | ret = -ENOMEM; | |
1285 | goto out; | |
1286 | } | |
1287 | ||
1288 | for (; i < data->count; i++) { | |
1289 | (*bps)[i] = (struct ras_badpage){ | |
9dc23a63 | 1290 | .bp = data->bps[i].retired_page, |
466b1793 | 1291 | .size = AMDGPU_GPU_PAGE_SIZE, |
1292 | .flags = 0, | |
1293 | }; | |
1294 | ||
1295 | if (data->last_reserved <= i) | |
1296 | (*bps)[i].flags = 1; | |
9dc23a63 | 1297 | else if (data->bps_bo[i] == NULL) |
466b1793 | 1298 | (*bps)[i].flags = 2; |
1299 | } | |
1300 | ||
1301 | *count = data->count; | |
1302 | out: | |
1303 | mutex_unlock(&con->recovery_lock); | |
1304 | return ret; | |
1305 | } | |
1306 | ||
c030f2e4 | 1307 | static void amdgpu_ras_do_recovery(struct work_struct *work) |
1308 | { | |
1309 | struct amdgpu_ras *ras = | |
1310 | container_of(work, struct amdgpu_ras, recovery_work); | |
1311 | ||
1312 | amdgpu_device_gpu_recover(ras->adev, 0); | |
1313 | atomic_set(&ras->in_recovery, 0); | |
1314 | } | |
1315 | ||
c030f2e4 | 1316 | /* alloc/realloc bps array */ |
1317 | static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev, | |
1318 | struct ras_err_handler_data *data, int pages) | |
1319 | { | |
1320 | unsigned int old_space = data->count + data->space_left; | |
1321 | unsigned int new_space = old_space + pages; | |
9dc23a63 TZ |
1322 | unsigned int align_space = ALIGN(new_space, 512); |
1323 | void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL); | |
1324 | struct amdgpu_bo **bps_bo = | |
1325 | kmalloc(align_space * sizeof(*data->bps_bo), GFP_KERNEL); | |
1326 | ||
1327 | if (!bps || !bps_bo) { | |
1328 | kfree(bps); | |
1329 | kfree(bps_bo); | |
c030f2e4 | 1330 | return -ENOMEM; |
9dc23a63 | 1331 | } |
c030f2e4 | 1332 | |
1333 | if (data->bps) { | |
9dc23a63 | 1334 | memcpy(bps, data->bps, |
c030f2e4 | 1335 | data->count * sizeof(*data->bps)); |
1336 | kfree(data->bps); | |
1337 | } | |
9dc23a63 TZ |
1338 | if (data->bps_bo) { |
1339 | memcpy(bps_bo, data->bps_bo, | |
1340 | data->count * sizeof(*data->bps_bo)); | |
1341 | kfree(data->bps_bo); | |
1342 | } | |
c030f2e4 | 1343 | |
9dc23a63 TZ |
1344 | data->bps = bps; |
1345 | data->bps_bo = bps_bo; | |
c030f2e4 | 1346 | data->space_left += align_space - old_space; |
1347 | return 0; | |
1348 | } | |
1349 | ||
1350 | /* it deal with vram only. */ | |
1351 | int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, | |
9dc23a63 | 1352 | struct eeprom_table_record *bps, int pages) |
c030f2e4 | 1353 | { |
1354 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
73aa8e1a | 1355 | struct ras_err_handler_data *data; |
c030f2e4 | 1356 | int ret = 0; |
1357 | ||
73aa8e1a | 1358 | if (!con || !con->eh_data || !bps || pages <= 0) |
c030f2e4 | 1359 | return 0; |
1360 | ||
1361 | mutex_lock(&con->recovery_lock); | |
73aa8e1a | 1362 | data = con->eh_data; |
c030f2e4 | 1363 | if (!data) |
1364 | goto out; | |
1365 | ||
1366 | if (data->space_left <= pages) | |
1367 | if (amdgpu_ras_realloc_eh_data_space(adev, data, pages)) { | |
1368 | ret = -ENOMEM; | |
1369 | goto out; | |
1370 | } | |
1371 | ||
9dc23a63 TZ |
1372 | memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps)); |
1373 | data->count += pages; | |
c030f2e4 | 1374 | data->space_left -= pages; |
9dc23a63 | 1375 | |
c030f2e4 | 1376 | out: |
1377 | mutex_unlock(&con->recovery_lock); | |
1378 | ||
1379 | return ret; | |
1380 | } | |
1381 | ||
78ad00c9 TZ |
1382 | /* |
1383 | * write error record array to eeprom, the function should be | |
1384 | * protected by recovery_lock | |
1385 | */ | |
1386 | static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev) | |
1387 | { | |
1388 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
1389 | struct ras_err_handler_data *data; | |
8a3e801f | 1390 | struct amdgpu_ras_eeprom_control *control; |
78ad00c9 TZ |
1391 | int save_count; |
1392 | ||
1393 | if (!con || !con->eh_data) | |
1394 | return 0; | |
1395 | ||
8a3e801f | 1396 | control = &con->eeprom_control; |
78ad00c9 TZ |
1397 | data = con->eh_data; |
1398 | save_count = data->count - control->num_recs; | |
1399 | /* only new entries are saved */ | |
1400 | if (save_count > 0) | |
0771b0bf | 1401 | if (amdgpu_ras_eeprom_process_recods(control, |
78ad00c9 TZ |
1402 | &data->bps[control->num_recs], |
1403 | true, | |
1404 | save_count)) { | |
1405 | DRM_ERROR("Failed to save EEPROM table data!"); | |
1406 | return -EIO; | |
1407 | } | |
1408 | ||
1409 | return 0; | |
1410 | } | |
1411 | ||
1412 | /* | |
1413 | * read error record array in eeprom and reserve enough space for | |
1414 | * storing new bad pages | |
1415 | */ | |
1416 | static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev) | |
1417 | { | |
1418 | struct amdgpu_ras_eeprom_control *control = | |
1419 | &adev->psp.ras.ras->eeprom_control; | |
1420 | struct eeprom_table_record *bps = NULL; | |
1421 | int ret = 0; | |
1422 | ||
1423 | /* no bad page record, skip eeprom access */ | |
1424 | if (!control->num_recs) | |
1425 | return ret; | |
1426 | ||
1427 | bps = kcalloc(control->num_recs, sizeof(*bps), GFP_KERNEL); | |
1428 | if (!bps) | |
1429 | return -ENOMEM; | |
1430 | ||
1431 | if (amdgpu_ras_eeprom_process_recods(control, bps, false, | |
1432 | control->num_recs)) { | |
1433 | DRM_ERROR("Failed to load EEPROM table records!"); | |
1434 | ret = -EIO; | |
1435 | goto out; | |
1436 | } | |
1437 | ||
1438 | ret = amdgpu_ras_add_bad_pages(adev, bps, control->num_recs); | |
1439 | ||
1440 | out: | |
1441 | kfree(bps); | |
1442 | return ret; | |
1443 | } | |
1444 | ||
6e4be987 TZ |
1445 | /* |
1446 | * check if an address belongs to bad page | |
1447 | * | |
1448 | * Note: this check is only for umc block | |
1449 | */ | |
1450 | static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, | |
1451 | uint64_t addr) | |
1452 | { | |
1453 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
1454 | struct ras_err_handler_data *data; | |
1455 | int i; | |
1456 | bool ret = false; | |
1457 | ||
1458 | if (!con || !con->eh_data) | |
1459 | return ret; | |
1460 | ||
1461 | mutex_lock(&con->recovery_lock); | |
1462 | data = con->eh_data; | |
1463 | if (!data) | |
1464 | goto out; | |
1465 | ||
1466 | addr >>= AMDGPU_GPU_PAGE_SHIFT; | |
1467 | for (i = 0; i < data->count; i++) | |
1468 | if (addr == data->bps[i].retired_page) { | |
1469 | ret = true; | |
1470 | goto out; | |
1471 | } | |
1472 | ||
1473 | out: | |
1474 | mutex_unlock(&con->recovery_lock); | |
1475 | return ret; | |
1476 | } | |
1477 | ||
c030f2e4 | 1478 | /* called in gpu recovery/init */ |
1479 | int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev) | |
1480 | { | |
1481 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
73aa8e1a | 1482 | struct ras_err_handler_data *data; |
c030f2e4 | 1483 | uint64_t bp; |
de7b45ba | 1484 | struct amdgpu_bo *bo = NULL; |
78ad00c9 | 1485 | int i, ret = 0; |
c030f2e4 | 1486 | |
73aa8e1a | 1487 | if (!con || !con->eh_data) |
c030f2e4 | 1488 | return 0; |
1489 | ||
1490 | mutex_lock(&con->recovery_lock); | |
73aa8e1a | 1491 | data = con->eh_data; |
1492 | if (!data) | |
1493 | goto out; | |
c030f2e4 | 1494 | /* reserve vram at driver post stage. */ |
1495 | for (i = data->last_reserved; i < data->count; i++) { | |
9dc23a63 | 1496 | bp = data->bps[i].retired_page; |
c030f2e4 | 1497 | |
ae115c81 TZ |
1498 | /* There are two cases of reserve error should be ignored: |
1499 | * 1) a ras bad page has been allocated (used by someone); | |
1500 | * 2) a ras bad page has been reserved (duplicate error injection | |
1501 | * for one page); | |
1502 | */ | |
a142ba88 AD |
1503 | if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT, |
1504 | AMDGPU_GPU_PAGE_SIZE, | |
de7b45ba CK |
1505 | AMDGPU_GEM_DOMAIN_VRAM, |
1506 | &bo, NULL)) | |
ae115c81 | 1507 | DRM_WARN("RAS WARN: reserve vram for retired page %llx fail\n", bp); |
c030f2e4 | 1508 | |
9dc23a63 | 1509 | data->bps_bo[i] = bo; |
c030f2e4 | 1510 | data->last_reserved = i + 1; |
de7b45ba | 1511 | bo = NULL; |
c030f2e4 | 1512 | } |
78ad00c9 TZ |
1513 | |
1514 | /* continue to save bad pages to eeprom even reesrve_vram fails */ | |
1515 | ret = amdgpu_ras_save_bad_pages(adev); | |
73aa8e1a | 1516 | out: |
c030f2e4 | 1517 | mutex_unlock(&con->recovery_lock); |
78ad00c9 | 1518 | return ret; |
c030f2e4 | 1519 | } |
1520 | ||
1521 | /* called when driver unload */ | |
1522 | static int amdgpu_ras_release_bad_pages(struct amdgpu_device *adev) | |
1523 | { | |
1524 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
73aa8e1a | 1525 | struct ras_err_handler_data *data; |
c030f2e4 | 1526 | struct amdgpu_bo *bo; |
1527 | int i; | |
1528 | ||
73aa8e1a | 1529 | if (!con || !con->eh_data) |
c030f2e4 | 1530 | return 0; |
1531 | ||
1532 | mutex_lock(&con->recovery_lock); | |
73aa8e1a | 1533 | data = con->eh_data; |
1534 | if (!data) | |
1535 | goto out; | |
1536 | ||
c030f2e4 | 1537 | for (i = data->last_reserved - 1; i >= 0; i--) { |
9dc23a63 | 1538 | bo = data->bps_bo[i]; |
c030f2e4 | 1539 | |
de7b45ba | 1540 | amdgpu_bo_free_kernel(&bo, NULL, NULL); |
c030f2e4 | 1541 | |
9dc23a63 | 1542 | data->bps_bo[i] = bo; |
c030f2e4 | 1543 | data->last_reserved = i; |
1544 | } | |
73aa8e1a | 1545 | out: |
c030f2e4 | 1546 | mutex_unlock(&con->recovery_lock); |
1547 | return 0; | |
1548 | } | |
1549 | ||
1a6fc071 | 1550 | int amdgpu_ras_recovery_init(struct amdgpu_device *adev) |
c030f2e4 | 1551 | { |
1552 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
4d1337d2 | 1553 | struct ras_err_handler_data **data; |
78ad00c9 | 1554 | int ret; |
c030f2e4 | 1555 | |
4d1337d2 AG |
1556 | if (con) |
1557 | data = &con->eh_data; | |
1558 | else | |
1559 | return 0; | |
1560 | ||
1a6fc071 TZ |
1561 | *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO); |
1562 | if (!*data) { | |
1563 | ret = -ENOMEM; | |
1564 | goto out; | |
1565 | } | |
c030f2e4 | 1566 | |
1567 | mutex_init(&con->recovery_lock); | |
1568 | INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery); | |
1569 | atomic_set(&con->in_recovery, 0); | |
1570 | con->adev = adev; | |
1571 | ||
0771b0bf | 1572 | ret = amdgpu_ras_eeprom_init(&con->eeprom_control); |
78ad00c9 | 1573 | if (ret) |
1a6fc071 | 1574 | goto free; |
78ad00c9 | 1575 | |
0771b0bf | 1576 | if (con->eeprom_control.num_recs) { |
78ad00c9 TZ |
1577 | ret = amdgpu_ras_load_bad_pages(adev); |
1578 | if (ret) | |
1a6fc071 | 1579 | goto free; |
78ad00c9 TZ |
1580 | ret = amdgpu_ras_reserve_bad_pages(adev); |
1581 | if (ret) | |
1a6fc071 | 1582 | goto release; |
78ad00c9 | 1583 | } |
c030f2e4 | 1584 | |
1585 | return 0; | |
1a6fc071 TZ |
1586 | |
1587 | release: | |
1588 | amdgpu_ras_release_bad_pages(adev); | |
1589 | free: | |
1a6fc071 TZ |
1590 | kfree((*data)->bps); |
1591 | kfree((*data)->bps_bo); | |
1592 | kfree(*data); | |
1995b3a3 | 1593 | con->eh_data = NULL; |
1a6fc071 TZ |
1594 | out: |
1595 | DRM_WARN("Failed to initialize ras recovery!\n"); | |
1596 | ||
1597 | return ret; | |
c030f2e4 | 1598 | } |
1599 | ||
1600 | static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) | |
1601 | { | |
1602 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
1603 | struct ras_err_handler_data *data = con->eh_data; | |
1604 | ||
1a6fc071 TZ |
1605 | /* recovery_init failed to init it, fini is useless */ |
1606 | if (!data) | |
1607 | return 0; | |
1608 | ||
c030f2e4 | 1609 | cancel_work_sync(&con->recovery_work); |
c030f2e4 | 1610 | amdgpu_ras_release_bad_pages(adev); |
1611 | ||
1612 | mutex_lock(&con->recovery_lock); | |
1613 | con->eh_data = NULL; | |
1614 | kfree(data->bps); | |
1a6fc071 | 1615 | kfree(data->bps_bo); |
c030f2e4 | 1616 | kfree(data); |
1617 | mutex_unlock(&con->recovery_lock); | |
1618 | ||
1619 | return 0; | |
1620 | } | |
1621 | /* recovery end */ | |
1622 | ||
a564808e | 1623 | /* return 0 if ras will reset gpu and repost.*/ |
1624 | int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev, | |
1625 | unsigned int block) | |
1626 | { | |
1627 | struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); | |
1628 | ||
1629 | if (!ras) | |
1630 | return -EINVAL; | |
1631 | ||
1632 | ras->flags |= AMDGPU_RAS_FLAG_INIT_NEED_RESET; | |
1633 | return 0; | |
1634 | } | |
1635 | ||
5caf466a | 1636 | /* |
1637 | * check hardware's ras ability which will be saved in hw_supported. | |
1638 | * if hardware does not support ras, we can skip some ras initializtion and | |
1639 | * forbid some ras operations from IP. | |
1640 | * if software itself, say boot parameter, limit the ras ability. We still | |
1641 | * need allow IP do some limited operations, like disable. In such case, | |
1642 | * we have to initialize ras as normal. but need check if operation is | |
1643 | * allowed or not in each function. | |
1644 | */ | |
1645 | static void amdgpu_ras_check_supported(struct amdgpu_device *adev, | |
1646 | uint32_t *hw_supported, uint32_t *supported) | |
c030f2e4 | 1647 | { |
5caf466a | 1648 | *hw_supported = 0; |
1649 | *supported = 0; | |
c030f2e4 | 1650 | |
5caf466a | 1651 | if (amdgpu_sriov_vf(adev) || |
b404ae82 | 1652 | adev->asic_type != CHIP_VEGA20) |
5caf466a | 1653 | return; |
b404ae82 | 1654 | |
5d0f903f | 1655 | if (adev->is_atom_fw && |
1656 | (amdgpu_atomfirmware_mem_ecc_supported(adev) || | |
1657 | amdgpu_atomfirmware_sram_ecc_supported(adev))) | |
5caf466a | 1658 | *hw_supported = AMDGPU_RAS_BLOCK_MASK; |
b404ae82 | 1659 | |
5caf466a | 1660 | *supported = amdgpu_ras_enable == 0 ? |
1661 | 0 : *hw_supported & amdgpu_ras_mask; | |
c030f2e4 | 1662 | } |
1663 | ||
1664 | int amdgpu_ras_init(struct amdgpu_device *adev) | |
1665 | { | |
1666 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
4e644fff | 1667 | int r; |
c030f2e4 | 1668 | |
b404ae82 | 1669 | if (con) |
c030f2e4 | 1670 | return 0; |
1671 | ||
1672 | con = kmalloc(sizeof(struct amdgpu_ras) + | |
1673 | sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT, | |
1674 | GFP_KERNEL|__GFP_ZERO); | |
1675 | if (!con) | |
1676 | return -ENOMEM; | |
1677 | ||
1678 | con->objs = (struct ras_manager *)(con + 1); | |
1679 | ||
1680 | amdgpu_ras_set_context(adev, con); | |
1681 | ||
5caf466a | 1682 | amdgpu_ras_check_supported(adev, &con->hw_supported, |
1683 | &con->supported); | |
fb2a3607 HZ |
1684 | if (!con->hw_supported) { |
1685 | amdgpu_ras_set_context(adev, NULL); | |
1686 | kfree(con); | |
1687 | return 0; | |
1688 | } | |
1689 | ||
c030f2e4 | 1690 | con->features = 0; |
1691 | INIT_LIST_HEAD(&con->head); | |
108c6a63 | 1692 | /* Might need get this flag from vbios. */ |
1693 | con->flags = RAS_DEFAULT_FLAGS; | |
c030f2e4 | 1694 | |
4e644fff HZ |
1695 | if (adev->nbio.funcs->init_ras_controller_interrupt) { |
1696 | r = adev->nbio.funcs->init_ras_controller_interrupt(adev); | |
1697 | if (r) | |
1698 | return r; | |
1699 | } | |
1700 | ||
1701 | if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) { | |
1702 | r = adev->nbio.funcs->init_ras_err_event_athub_interrupt(adev); | |
1703 | if (r) | |
1704 | return r; | |
1705 | } | |
1706 | ||
c030f2e4 | 1707 | amdgpu_ras_mask &= AMDGPU_RAS_BLOCK_MASK; |
1708 | ||
c030f2e4 | 1709 | if (amdgpu_ras_fs_init(adev)) |
1710 | goto fs_out; | |
1711 | ||
5d0f903f | 1712 | DRM_INFO("RAS INFO: ras initialized successfully, " |
1713 | "hardware ability[%x] ras_mask[%x]\n", | |
1714 | con->hw_supported, con->supported); | |
c030f2e4 | 1715 | return 0; |
1716 | fs_out: | |
c030f2e4 | 1717 | amdgpu_ras_set_context(adev, NULL); |
1718 | kfree(con); | |
1719 | ||
1720 | return -EINVAL; | |
1721 | } | |
1722 | ||
b293e891 HZ |
1723 | /* helper function to handle common stuff in ip late init phase */ |
1724 | int amdgpu_ras_late_init(struct amdgpu_device *adev, | |
1725 | struct ras_common_if *ras_block, | |
1726 | struct ras_fs_if *fs_info, | |
1727 | struct ras_ih_if *ih_info) | |
1728 | { | |
1729 | int r; | |
1730 | ||
1731 | /* disable RAS feature per IP block if it is not supported */ | |
1732 | if (!amdgpu_ras_is_supported(adev, ras_block->block)) { | |
1733 | amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); | |
1734 | return 0; | |
1735 | } | |
1736 | ||
1737 | r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1); | |
1738 | if (r) { | |
1739 | if (r == -EAGAIN) { | |
1740 | /* request gpu reset. will run again */ | |
1741 | amdgpu_ras_request_reset_on_boot(adev, | |
1742 | ras_block->block); | |
1743 | return 0; | |
1744 | } else if (adev->in_suspend || adev->in_gpu_reset) { | |
1745 | /* in resume phase, if fail to enable ras, | |
1746 | * clean up all ras fs nodes, and disable ras */ | |
1747 | goto cleanup; | |
1748 | } else | |
1749 | return r; | |
1750 | } | |
1751 | ||
1752 | /* in resume phase, no need to create ras fs node */ | |
1753 | if (adev->in_suspend || adev->in_gpu_reset) | |
1754 | return 0; | |
1755 | ||
1756 | if (ih_info->cb) { | |
1757 | r = amdgpu_ras_interrupt_add_handler(adev, ih_info); | |
1758 | if (r) | |
1759 | goto interrupt; | |
1760 | } | |
1761 | ||
1762 | amdgpu_ras_debugfs_create(adev, fs_info); | |
1763 | ||
1764 | r = amdgpu_ras_sysfs_create(adev, fs_info); | |
1765 | if (r) | |
1766 | goto sysfs; | |
1767 | ||
1768 | return 0; | |
1769 | cleanup: | |
1770 | amdgpu_ras_sysfs_remove(adev, ras_block); | |
1771 | sysfs: | |
1772 | amdgpu_ras_debugfs_remove(adev, ras_block); | |
1773 | if (ih_info->cb) | |
1774 | amdgpu_ras_interrupt_remove_handler(adev, ih_info); | |
1775 | interrupt: | |
1776 | amdgpu_ras_feature_enable(adev, ras_block, 0); | |
1777 | return r; | |
1778 | } | |
1779 | ||
1780 | /* helper function to remove ras fs node and interrupt handler */ | |
1781 | void amdgpu_ras_late_fini(struct amdgpu_device *adev, | |
1782 | struct ras_common_if *ras_block, | |
1783 | struct ras_ih_if *ih_info) | |
1784 | { | |
1785 | if (!ras_block || !ih_info) | |
1786 | return; | |
1787 | ||
1788 | amdgpu_ras_sysfs_remove(adev, ras_block); | |
1789 | amdgpu_ras_debugfs_remove(adev, ras_block); | |
1790 | if (ih_info->cb) | |
1791 | amdgpu_ras_interrupt_remove_handler(adev, ih_info); | |
1792 | amdgpu_ras_feature_enable(adev, ras_block, 0); | |
1793 | } | |
1794 | ||
a564808e | 1795 | /* do some init work after IP late init as dependence. |
511fdbc3 | 1796 | * and it runs in resume/gpu reset/booting up cases. |
a564808e | 1797 | */ |
511fdbc3 | 1798 | void amdgpu_ras_resume(struct amdgpu_device *adev) |
108c6a63 | 1799 | { |
1800 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
1801 | struct ras_manager *obj, *tmp; | |
1802 | ||
1803 | if (!con) | |
1804 | return; | |
1805 | ||
108c6a63 | 1806 | if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { |
191051a1 | 1807 | /* Set up all other IPs which are not implemented. There is a |
1808 | * tricky thing that IP's actual ras error type should be | |
1809 | * MULTI_UNCORRECTABLE, but as driver does not handle it, so | |
1810 | * ERROR_NONE make sense anyway. | |
1811 | */ | |
1812 | amdgpu_ras_enable_all_features(adev, 1); | |
1813 | ||
1814 | /* We enable ras on all hw_supported block, but as boot | |
1815 | * parameter might disable some of them and one or more IP has | |
1816 | * not implemented yet. So we disable them on behalf. | |
1817 | */ | |
108c6a63 | 1818 | list_for_each_entry_safe(obj, tmp, &con->head, node) { |
1819 | if (!amdgpu_ras_is_supported(adev, obj->head.block)) { | |
1820 | amdgpu_ras_feature_enable(adev, &obj->head, 0); | |
1821 | /* there should be no any reference. */ | |
1822 | WARN_ON(alive_obj(obj)); | |
1823 | } | |
191051a1 | 1824 | } |
108c6a63 | 1825 | } |
a564808e | 1826 | |
1827 | if (con->flags & AMDGPU_RAS_FLAG_INIT_NEED_RESET) { | |
1828 | con->flags &= ~AMDGPU_RAS_FLAG_INIT_NEED_RESET; | |
1829 | /* setup ras obj state as disabled. | |
1830 | * for init_by_vbios case. | |
1831 | * if we want to enable ras, just enable it in a normal way. | |
1832 | * If we want do disable it, need setup ras obj as enabled, | |
1833 | * then issue another TA disable cmd. | |
1834 | * See feature_enable_on_boot | |
1835 | */ | |
1836 | amdgpu_ras_disable_all_features(adev, 1); | |
1837 | amdgpu_ras_reset_gpu(adev, 0); | |
1838 | } | |
108c6a63 | 1839 | } |
1840 | ||
511fdbc3 | 1841 | void amdgpu_ras_suspend(struct amdgpu_device *adev) |
1842 | { | |
1843 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
1844 | ||
1845 | if (!con) | |
1846 | return; | |
1847 | ||
1848 | amdgpu_ras_disable_all_features(adev, 0); | |
1849 | /* Make sure all ras objects are disabled. */ | |
1850 | if (con->features) | |
1851 | amdgpu_ras_disable_all_features(adev, 1); | |
1852 | } | |
1853 | ||
c030f2e4 | 1854 | /* do some fini work before IP fini as dependence */ |
1855 | int amdgpu_ras_pre_fini(struct amdgpu_device *adev) | |
1856 | { | |
1857 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
1858 | ||
1859 | if (!con) | |
1860 | return 0; | |
1861 | ||
1862 | /* Need disable ras on all IPs here before ip [hw/sw]fini */ | |
1863 | amdgpu_ras_disable_all_features(adev, 0); | |
1864 | amdgpu_ras_recovery_fini(adev); | |
1865 | return 0; | |
1866 | } | |
1867 | ||
1868 | int amdgpu_ras_fini(struct amdgpu_device *adev) | |
1869 | { | |
1870 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
1871 | ||
1872 | if (!con) | |
1873 | return 0; | |
1874 | ||
1875 | amdgpu_ras_fs_fini(adev); | |
1876 | amdgpu_ras_interrupt_remove_all(adev); | |
1877 | ||
1878 | WARN(con->features, "Feature mask is not cleared"); | |
1879 | ||
1880 | if (con->features) | |
1881 | amdgpu_ras_disable_all_features(adev, 1); | |
1882 | ||
1883 | amdgpu_ras_set_context(adev, NULL); | |
1884 | kfree(con); | |
1885 | ||
1886 | return 0; | |
1887 | } | |
7c6e68c7 AG |
1888 | |
1889 | void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) | |
1890 | { | |
ed606f8a AG |
1891 | uint32_t hw_supported, supported; |
1892 | ||
1893 | amdgpu_ras_check_supported(adev, &hw_supported, &supported); | |
1894 | if (!hw_supported) | |
1895 | return; | |
1896 | ||
7c6e68c7 | 1897 | if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) { |
d5ea093e AG |
1898 | DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n"); |
1899 | ||
1900 | amdgpu_ras_reset_gpu(adev, false); | |
7c6e68c7 AG |
1901 | } |
1902 | } |