Commit | Line | Data |
---|---|---|
c030f2e4 | 1 | /* |
2 | * Copyright 2018 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * | |
23 | */ | |
24 | #include <linux/debugfs.h> | |
25 | #include <linux/list.h> | |
26 | #include <linux/module.h> | |
f867723b | 27 | #include <linux/uaccess.h> |
7c6e68c7 AG |
28 | #include <linux/reboot.h> |
29 | #include <linux/syscalls.h> | |
f867723b | 30 | |
c030f2e4 | 31 | #include "amdgpu.h" |
32 | #include "amdgpu_ras.h" | |
b404ae82 | 33 | #include "amdgpu_atomfirmware.h" |
19744f5f | 34 | #include "amdgpu_xgmi.h" |
4e644fff | 35 | #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" |
c030f2e4 | 36 | |
c030f2e4 | 37 | const char *ras_error_string[] = { |
38 | "none", | |
39 | "parity", | |
40 | "single_correctable", | |
41 | "multi_uncorrectable", | |
42 | "poison", | |
43 | }; | |
44 | ||
45 | const char *ras_block_string[] = { | |
46 | "umc", | |
47 | "sdma", | |
48 | "gfx", | |
49 | "mmhub", | |
50 | "athub", | |
51 | "pcie_bif", | |
52 | "hdp", | |
53 | "xgmi_wafl", | |
54 | "df", | |
55 | "smn", | |
56 | "sem", | |
57 | "mp0", | |
58 | "mp1", | |
59 | "fuse", | |
60 | }; | |
61 | ||
62 | #define ras_err_str(i) (ras_error_string[ffs(i)]) | |
63 | #define ras_block_str(i) (ras_block_string[i]) | |
64 | ||
a564808e | 65 | #define AMDGPU_RAS_FLAG_INIT_BY_VBIOS 1 |
66 | #define AMDGPU_RAS_FLAG_INIT_NEED_RESET 2 | |
108c6a63 | 67 | #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS) |
68 | ||
7cdc2ee3 TZ |
69 | /* inject address is 52 bits */ |
70 | #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52) | |
71 | ||
52dd95f2 GC |
72 | enum amdgpu_ras_retire_page_reservation { |
73 | AMDGPU_RAS_RETIRE_PAGE_RESERVED, | |
74 | AMDGPU_RAS_RETIRE_PAGE_PENDING, | |
75 | AMDGPU_RAS_RETIRE_PAGE_FAULT, | |
76 | }; | |
7c6e68c7 AG |
77 | |
78 | atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0); | |
79 | ||
6e4be987 TZ |
80 | static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, |
81 | uint64_t addr); | |
82 | ||
c030f2e4 | 83 | static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, |
84 | size_t size, loff_t *pos) | |
85 | { | |
86 | struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private; | |
87 | struct ras_query_if info = { | |
88 | .head = obj->head, | |
89 | }; | |
90 | ssize_t s; | |
91 | char val[128]; | |
92 | ||
93 | if (amdgpu_ras_error_query(obj->adev, &info)) | |
94 | return -EINVAL; | |
95 | ||
96 | s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n", | |
97 | "ue", info.ue_count, | |
98 | "ce", info.ce_count); | |
99 | if (*pos >= s) | |
100 | return 0; | |
101 | ||
102 | s -= *pos; | |
103 | s = min_t(u64, s, size); | |
104 | ||
105 | ||
106 | if (copy_to_user(buf, &val[*pos], s)) | |
107 | return -EINVAL; | |
108 | ||
109 | *pos += s; | |
110 | ||
111 | return s; | |
112 | } | |
113 | ||
c030f2e4 | 114 | static const struct file_operations amdgpu_ras_debugfs_ops = { |
115 | .owner = THIS_MODULE, | |
116 | .read = amdgpu_ras_debugfs_read, | |
190211ab | 117 | .write = NULL, |
c030f2e4 | 118 | .llseek = default_llseek |
119 | }; | |
120 | ||
96ebb307 | 121 | static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id) |
122 | { | |
123 | int i; | |
124 | ||
125 | for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) { | |
126 | *block_id = i; | |
127 | if (strcmp(name, ras_block_str(i)) == 0) | |
128 | return 0; | |
129 | } | |
130 | return -EINVAL; | |
131 | } | |
132 | ||
133 | static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, | |
134 | const char __user *buf, size_t size, | |
135 | loff_t *pos, struct ras_debug_if *data) | |
136 | { | |
137 | ssize_t s = min_t(u64, 64, size); | |
138 | char str[65]; | |
139 | char block_name[33]; | |
140 | char err[9] = "ue"; | |
141 | int op = -1; | |
142 | int block_id; | |
44494f96 | 143 | uint32_t sub_block; |
96ebb307 | 144 | u64 address, value; |
145 | ||
146 | if (*pos) | |
147 | return -EINVAL; | |
148 | *pos = size; | |
149 | ||
150 | memset(str, 0, sizeof(str)); | |
151 | memset(data, 0, sizeof(*data)); | |
152 | ||
153 | if (copy_from_user(str, buf, s)) | |
154 | return -EINVAL; | |
155 | ||
156 | if (sscanf(str, "disable %32s", block_name) == 1) | |
157 | op = 0; | |
158 | else if (sscanf(str, "enable %32s %8s", block_name, err) == 2) | |
159 | op = 1; | |
160 | else if (sscanf(str, "inject %32s %8s", block_name, err) == 2) | |
161 | op = 2; | |
b076296b | 162 | else if (str[0] && str[1] && str[2] && str[3]) |
96ebb307 | 163 | /* ascii string, but commands are not matched. */ |
164 | return -EINVAL; | |
165 | ||
166 | if (op != -1) { | |
167 | if (amdgpu_ras_find_block_id_by_name(block_name, &block_id)) | |
168 | return -EINVAL; | |
169 | ||
170 | data->head.block = block_id; | |
e1063493 TZ |
171 | /* only ue and ce errors are supported */ |
172 | if (!memcmp("ue", err, 2)) | |
173 | data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; | |
174 | else if (!memcmp("ce", err, 2)) | |
175 | data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE; | |
176 | else | |
177 | return -EINVAL; | |
178 | ||
96ebb307 | 179 | data->op = op; |
180 | ||
181 | if (op == 2) { | |
44494f96 TZ |
182 | if (sscanf(str, "%*s %*s %*s %u %llu %llu", |
183 | &sub_block, &address, &value) != 3) | |
184 | if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx", | |
185 | &sub_block, &address, &value) != 3) | |
96ebb307 | 186 | return -EINVAL; |
44494f96 | 187 | data->head.sub_block_index = sub_block; |
96ebb307 | 188 | data->inject.address = address; |
189 | data->inject.value = value; | |
190 | } | |
191 | } else { | |
73aa8e1a | 192 | if (size < sizeof(*data)) |
96ebb307 | 193 | return -EINVAL; |
194 | ||
195 | if (copy_from_user(data, buf, sizeof(*data))) | |
196 | return -EINVAL; | |
197 | } | |
198 | ||
199 | return 0; | |
200 | } | |
7c6e68c7 | 201 | |
74abc221 TSD |
202 | /** |
203 | * DOC: AMDGPU RAS debugfs control interface | |
36ea1bd2 | 204 | * |
205 | * It accepts struct ras_debug_if who has two members. | |
206 | * | |
207 | * First member: ras_debug_if::head or ras_debug_if::inject. | |
96ebb307 | 208 | * |
209 | * head is used to indicate which IP block will be under control. | |
36ea1bd2 | 210 | * |
211 | * head has four members, they are block, type, sub_block_index, name. | |
212 | * block: which IP will be under control. | |
213 | * type: what kind of error will be enabled/disabled/injected. | |
214 | * sub_block_index: some IPs have subcomponets. say, GFX, sDMA. | |
215 | * name: the name of IP. | |
216 | * | |
217 | * inject has two more members than head, they are address, value. | |
218 | * As their names indicate, inject operation will write the | |
219 | * value to the address. | |
220 | * | |
ef177d11 | 221 | * The second member: struct ras_debug_if::op. |
c688a06b | 222 | * It has three kinds of operations. |
879e723d AZ |
223 | * |
224 | * - 0: disable RAS on the block. Take ::head as its data. | |
225 | * - 1: enable RAS on the block. Take ::head as its data. | |
226 | * - 2: inject errors on the block. Take ::inject as its data. | |
36ea1bd2 | 227 | * |
96ebb307 | 228 | * How to use the interface? |
ef177d11 AD |
229 | * |
230 | * Programs | |
231 | * | |
232 | * Copy the struct ras_debug_if in your codes and initialize it. | |
233 | * Write the struct to the control node. | |
234 | * | |
235 | * Shells | |
96ebb307 | 236 | * |
879e723d AZ |
237 | * .. code-block:: bash |
238 | * | |
a20bfd0f | 239 | * echo op block [error [sub_block address value]] > .../ras/ras_ctrl |
879e723d | 240 | * |
ef177d11 AD |
241 | * Parameters: |
242 | * | |
879e723d AZ |
243 | * op: disable, enable, inject |
244 | * disable: only block is needed | |
245 | * enable: block and error are needed | |
246 | * inject: error, address, value are needed | |
a20bfd0f | 247 | * block: umc, sdma, gfx, ......... |
879e723d AZ |
248 | * see ras_block_string[] for details |
249 | * error: ue, ce | |
250 | * ue: multi_uncorrectable | |
251 | * ce: single_correctable | |
252 | * sub_block: | |
253 | * sub block index, pass 0 if there is no sub block | |
254 | * | |
255 | * here are some examples for bash commands: | |
256 | * | |
257 | * .. code-block:: bash | |
96ebb307 | 258 | * |
44494f96 TZ |
259 | * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl |
260 | * echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl | |
96ebb307 | 261 | * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl |
262 | * | |
36ea1bd2 | 263 | * How to check the result? |
264 | * | |
265 | * For disable/enable, please check ras features at | |
266 | * /sys/class/drm/card[0/1/2...]/device/ras/features | |
267 | * | |
268 | * For inject, please check corresponding err count at | |
269 | * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count | |
270 | * | |
879e723d | 271 | * .. note:: |
ef177d11 | 272 | * Operations are only allowed on blocks which are supported. |
879e723d | 273 | * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask |
ef177d11 AD |
274 | * to see which blocks support RAS on a particular asic. |
275 | * | |
36ea1bd2 | 276 | */ |
277 | static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *buf, | |
278 | size_t size, loff_t *pos) | |
279 | { | |
280 | struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; | |
281 | struct ras_debug_if data; | |
282 | int ret = 0; | |
283 | ||
43c4d576 JC |
284 | if (amdgpu_ras_intr_triggered()) { |
285 | DRM_WARN("RAS WARN: error injection currently inaccessible\n"); | |
286 | return size; | |
287 | } | |
288 | ||
96ebb307 | 289 | ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data); |
290 | if (ret) | |
36ea1bd2 | 291 | return -EINVAL; |
292 | ||
36ea1bd2 | 293 | if (!amdgpu_ras_is_supported(adev, data.head.block)) |
294 | return -EINVAL; | |
295 | ||
296 | switch (data.op) { | |
297 | case 0: | |
298 | ret = amdgpu_ras_feature_enable(adev, &data.head, 0); | |
299 | break; | |
300 | case 1: | |
301 | ret = amdgpu_ras_feature_enable(adev, &data.head, 1); | |
302 | break; | |
303 | case 2: | |
7cdc2ee3 TZ |
304 | if ((data.inject.address >= adev->gmc.mc_vram_size) || |
305 | (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) { | |
306 | ret = -EINVAL; | |
307 | break; | |
308 | } | |
309 | ||
6e4be987 TZ |
310 | /* umc ce/ue error injection for a bad page is not allowed */ |
311 | if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) && | |
312 | amdgpu_ras_check_bad_page(adev, data.inject.address)) { | |
313 | DRM_WARN("RAS WARN: 0x%llx has been marked as bad before error injection!\n", | |
314 | data.inject.address); | |
315 | break; | |
316 | } | |
317 | ||
7cdc2ee3 | 318 | /* data.inject.address is offset instead of absolute gpu address */ |
36ea1bd2 | 319 | ret = amdgpu_ras_error_inject(adev, &data.inject); |
320 | break; | |
96ebb307 | 321 | default: |
322 | ret = -EINVAL; | |
323 | break; | |
374bf7bd | 324 | } |
36ea1bd2 | 325 | |
326 | if (ret) | |
327 | return -EINVAL; | |
328 | ||
329 | return size; | |
330 | } | |
331 | ||
084fe13b AG |
332 | /** |
333 | * DOC: AMDGPU RAS debugfs EEPROM table reset interface | |
334 | * | |
f77c7109 | 335 | * Some boards contain an EEPROM which is used to persistently store a list of |
ef177d11 | 336 | * bad pages which experiences ECC errors in vram. This interface provides |
f77c7109 AD |
337 | * a way to reset the EEPROM, e.g., after testing error injection. |
338 | * | |
339 | * Usage: | |
340 | * | |
341 | * .. code-block:: bash | |
342 | * | |
343 | * echo 1 > ../ras/ras_eeprom_reset | |
344 | * | |
345 | * will reset EEPROM table to 0 entries. | |
346 | * | |
084fe13b AG |
347 | */ |
348 | static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf, | |
349 | size_t size, loff_t *pos) | |
350 | { | |
351 | struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; | |
352 | int ret; | |
353 | ||
354 | ret = amdgpu_ras_eeprom_reset_table(&adev->psp.ras.ras->eeprom_control); | |
355 | ||
356 | return ret == 1 ? size : -EIO; | |
357 | } | |
358 | ||
36ea1bd2 | 359 | static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = { |
360 | .owner = THIS_MODULE, | |
361 | .read = NULL, | |
362 | .write = amdgpu_ras_debugfs_ctrl_write, | |
363 | .llseek = default_llseek | |
364 | }; | |
365 | ||
084fe13b AG |
366 | static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = { |
367 | .owner = THIS_MODULE, | |
368 | .read = NULL, | |
369 | .write = amdgpu_ras_debugfs_eeprom_write, | |
370 | .llseek = default_llseek | |
371 | }; | |
372 | ||
f77c7109 AD |
373 | /** |
374 | * DOC: AMDGPU RAS sysfs Error Count Interface | |
375 | * | |
ef177d11 | 376 | * It allows the user to read the error count for each IP block on the gpu through |
f77c7109 AD |
377 | * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count |
378 | * | |
379 | * It outputs the multiple lines which report the uncorrected (ue) and corrected | |
380 | * (ce) error counts. | |
381 | * | |
382 | * The format of one line is below, | |
383 | * | |
384 | * [ce|ue]: count | |
385 | * | |
386 | * Example: | |
387 | * | |
388 | * .. code-block:: bash | |
389 | * | |
390 | * ue: 0 | |
391 | * ce: 1 | |
392 | * | |
393 | */ | |
c030f2e4 | 394 | static ssize_t amdgpu_ras_sysfs_read(struct device *dev, |
395 | struct device_attribute *attr, char *buf) | |
396 | { | |
397 | struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr); | |
398 | struct ras_query_if info = { | |
399 | .head = obj->head, | |
400 | }; | |
401 | ||
43c4d576 JC |
402 | if (amdgpu_ras_intr_triggered()) |
403 | return snprintf(buf, PAGE_SIZE, | |
404 | "Query currently inaccessible\n"); | |
405 | ||
c030f2e4 | 406 | if (amdgpu_ras_error_query(obj->adev, &info)) |
407 | return -EINVAL; | |
408 | ||
409 | return snprintf(buf, PAGE_SIZE, "%s: %lu\n%s: %lu\n", | |
410 | "ue", info.ue_count, | |
411 | "ce", info.ce_count); | |
412 | } | |
413 | ||
414 | /* obj begin */ | |
415 | ||
416 | #define get_obj(obj) do { (obj)->use++; } while (0) | |
417 | #define alive_obj(obj) ((obj)->use) | |
418 | ||
419 | static inline void put_obj(struct ras_manager *obj) | |
420 | { | |
421 | if (obj && --obj->use == 0) | |
422 | list_del(&obj->node); | |
423 | if (obj && obj->use < 0) { | |
424 | DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name); | |
425 | } | |
426 | } | |
427 | ||
428 | /* make one obj and return it. */ | |
429 | static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev, | |
430 | struct ras_common_if *head) | |
431 | { | |
432 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
433 | struct ras_manager *obj; | |
434 | ||
435 | if (!con) | |
436 | return NULL; | |
437 | ||
438 | if (head->block >= AMDGPU_RAS_BLOCK_COUNT) | |
439 | return NULL; | |
440 | ||
441 | obj = &con->objs[head->block]; | |
442 | /* already exist. return obj? */ | |
443 | if (alive_obj(obj)) | |
444 | return NULL; | |
445 | ||
446 | obj->head = *head; | |
447 | obj->adev = adev; | |
448 | list_add(&obj->node, &con->head); | |
449 | get_obj(obj); | |
450 | ||
451 | return obj; | |
452 | } | |
453 | ||
454 | /* return an obj equal to head, or the first when head is NULL */ | |
f2a79be1 | 455 | struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, |
c030f2e4 | 456 | struct ras_common_if *head) |
457 | { | |
458 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
459 | struct ras_manager *obj; | |
460 | int i; | |
461 | ||
462 | if (!con) | |
463 | return NULL; | |
464 | ||
465 | if (head) { | |
466 | if (head->block >= AMDGPU_RAS_BLOCK_COUNT) | |
467 | return NULL; | |
468 | ||
469 | obj = &con->objs[head->block]; | |
470 | ||
471 | if (alive_obj(obj)) { | |
472 | WARN_ON(head->block != obj->head.block); | |
473 | return obj; | |
474 | } | |
475 | } else { | |
476 | for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) { | |
477 | obj = &con->objs[i]; | |
478 | if (alive_obj(obj)) { | |
479 | WARN_ON(i != obj->head.block); | |
480 | return obj; | |
481 | } | |
482 | } | |
483 | } | |
484 | ||
485 | return NULL; | |
486 | } | |
487 | /* obj end */ | |
488 | ||
489 | /* feature ctl begin */ | |
490 | static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev, | |
491 | struct ras_common_if *head) | |
492 | { | |
5caf466a | 493 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
494 | ||
495 | return con->hw_supported & BIT(head->block); | |
c030f2e4 | 496 | } |
497 | ||
498 | static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev, | |
499 | struct ras_common_if *head) | |
500 | { | |
501 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
502 | ||
503 | return con->features & BIT(head->block); | |
504 | } | |
505 | ||
506 | /* | |
507 | * if obj is not created, then create one. | |
508 | * set feature enable flag. | |
509 | */ | |
510 | static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev, | |
511 | struct ras_common_if *head, int enable) | |
512 | { | |
513 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
514 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); | |
515 | ||
5caf466a | 516 | /* If hardware does not support ras, then do not create obj. |
517 | * But if hardware support ras, we can create the obj. | |
518 | * Ras framework checks con->hw_supported to see if it need do | |
519 | * corresponding initialization. | |
520 | * IP checks con->support to see if it need disable ras. | |
521 | */ | |
c030f2e4 | 522 | if (!amdgpu_ras_is_feature_allowed(adev, head)) |
523 | return 0; | |
524 | if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) | |
525 | return 0; | |
526 | ||
527 | if (enable) { | |
528 | if (!obj) { | |
529 | obj = amdgpu_ras_create_obj(adev, head); | |
530 | if (!obj) | |
531 | return -EINVAL; | |
532 | } else { | |
533 | /* In case we create obj somewhere else */ | |
534 | get_obj(obj); | |
535 | } | |
536 | con->features |= BIT(head->block); | |
537 | } else { | |
538 | if (obj && amdgpu_ras_is_feature_enabled(adev, head)) { | |
539 | con->features &= ~BIT(head->block); | |
540 | put_obj(obj); | |
541 | } | |
542 | } | |
543 | ||
544 | return 0; | |
545 | } | |
546 | ||
547 | /* wrapper of psp_ras_enable_features */ | |
548 | int amdgpu_ras_feature_enable(struct amdgpu_device *adev, | |
549 | struct ras_common_if *head, bool enable) | |
550 | { | |
551 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
552 | union ta_ras_cmd_input info; | |
553 | int ret; | |
554 | ||
555 | if (!con) | |
556 | return -EINVAL; | |
557 | ||
558 | if (!enable) { | |
559 | info.disable_features = (struct ta_ras_disable_features_input) { | |
828cfa29 | 560 | .block_id = amdgpu_ras_block_to_ta(head->block), |
561 | .error_type = amdgpu_ras_error_to_ta(head->type), | |
c030f2e4 | 562 | }; |
563 | } else { | |
564 | info.enable_features = (struct ta_ras_enable_features_input) { | |
828cfa29 | 565 | .block_id = amdgpu_ras_block_to_ta(head->block), |
566 | .error_type = amdgpu_ras_error_to_ta(head->type), | |
c030f2e4 | 567 | }; |
568 | } | |
569 | ||
570 | /* Do not enable if it is not allowed. */ | |
571 | WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head)); | |
572 | /* Are we alerady in that state we are going to set? */ | |
573 | if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) | |
574 | return 0; | |
575 | ||
bff77e86 LM |
576 | if (!amdgpu_ras_intr_triggered()) { |
577 | ret = psp_ras_enable_features(&adev->psp, &info, enable); | |
578 | if (ret) { | |
579 | DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n", | |
580 | enable ? "enable":"disable", | |
581 | ras_block_str(head->block), | |
582 | ret); | |
583 | if (ret == TA_RAS_STATUS__RESET_NEEDED) | |
584 | return -EAGAIN; | |
585 | return -EINVAL; | |
586 | } | |
c030f2e4 | 587 | } |
588 | ||
589 | /* setup the obj */ | |
590 | __amdgpu_ras_feature_enable(adev, head, enable); | |
591 | ||
592 | return 0; | |
593 | } | |
594 | ||
77de502b | 595 | /* Only used in device probe stage and called only once. */ |
596 | int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev, | |
597 | struct ras_common_if *head, bool enable) | |
598 | { | |
599 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
600 | int ret; | |
601 | ||
602 | if (!con) | |
603 | return -EINVAL; | |
604 | ||
605 | if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { | |
7af23ebe | 606 | if (enable) { |
607 | /* There is no harm to issue a ras TA cmd regardless of | |
608 | * the currecnt ras state. | |
609 | * If current state == target state, it will do nothing | |
610 | * But sometimes it requests driver to reset and repost | |
611 | * with error code -EAGAIN. | |
612 | */ | |
613 | ret = amdgpu_ras_feature_enable(adev, head, 1); | |
614 | /* With old ras TA, we might fail to enable ras. | |
615 | * Log it and just setup the object. | |
616 | * TODO need remove this WA in the future. | |
617 | */ | |
618 | if (ret == -EINVAL) { | |
619 | ret = __amdgpu_ras_feature_enable(adev, head, 1); | |
620 | if (!ret) | |
621 | DRM_INFO("RAS INFO: %s setup object\n", | |
622 | ras_block_str(head->block)); | |
623 | } | |
624 | } else { | |
625 | /* setup the object then issue a ras TA disable cmd.*/ | |
626 | ret = __amdgpu_ras_feature_enable(adev, head, 1); | |
627 | if (ret) | |
628 | return ret; | |
77de502b | 629 | |
77de502b | 630 | ret = amdgpu_ras_feature_enable(adev, head, 0); |
7af23ebe | 631 | } |
77de502b | 632 | } else |
633 | ret = amdgpu_ras_feature_enable(adev, head, enable); | |
634 | ||
635 | return ret; | |
636 | } | |
637 | ||
c030f2e4 | 638 | static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev, |
639 | bool bypass) | |
640 | { | |
641 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
642 | struct ras_manager *obj, *tmp; | |
643 | ||
644 | list_for_each_entry_safe(obj, tmp, &con->head, node) { | |
645 | /* bypass psp. | |
646 | * aka just release the obj and corresponding flags | |
647 | */ | |
648 | if (bypass) { | |
649 | if (__amdgpu_ras_feature_enable(adev, &obj->head, 0)) | |
650 | break; | |
651 | } else { | |
652 | if (amdgpu_ras_feature_enable(adev, &obj->head, 0)) | |
653 | break; | |
654 | } | |
289d513b | 655 | } |
c030f2e4 | 656 | |
657 | return con->features; | |
658 | } | |
659 | ||
660 | static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev, | |
661 | bool bypass) | |
662 | { | |
663 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
664 | int ras_block_count = AMDGPU_RAS_BLOCK_COUNT; | |
665 | int i; | |
191051a1 | 666 | const enum amdgpu_ras_error_type default_ras_type = |
667 | AMDGPU_RAS_ERROR__NONE; | |
c030f2e4 | 668 | |
669 | for (i = 0; i < ras_block_count; i++) { | |
670 | struct ras_common_if head = { | |
671 | .block = i, | |
191051a1 | 672 | .type = default_ras_type, |
c030f2e4 | 673 | .sub_block_index = 0, |
674 | }; | |
675 | strcpy(head.name, ras_block_str(i)); | |
676 | if (bypass) { | |
677 | /* | |
678 | * bypass psp. vbios enable ras for us. | |
679 | * so just create the obj | |
680 | */ | |
681 | if (__amdgpu_ras_feature_enable(adev, &head, 1)) | |
682 | break; | |
683 | } else { | |
684 | if (amdgpu_ras_feature_enable(adev, &head, 1)) | |
685 | break; | |
686 | } | |
289d513b | 687 | } |
c030f2e4 | 688 | |
689 | return con->features; | |
690 | } | |
691 | /* feature ctl end */ | |
692 | ||
693 | /* query/inject/cure begin */ | |
694 | int amdgpu_ras_error_query(struct amdgpu_device *adev, | |
695 | struct ras_query_if *info) | |
696 | { | |
697 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); | |
6f102dba | 698 | struct ras_err_data err_data = {0, 0, 0, NULL}; |
3e81ee9a | 699 | int i; |
c030f2e4 | 700 | |
701 | if (!obj) | |
702 | return -EINVAL; | |
c030f2e4 | 703 | |
939e2258 HZ |
704 | switch (info->head.block) { |
705 | case AMDGPU_RAS_BLOCK__UMC: | |
045c0216 TZ |
706 | if (adev->umc.funcs->query_ras_error_count) |
707 | adev->umc.funcs->query_ras_error_count(adev, &err_data); | |
13b7c46c TZ |
708 | /* umc query_ras_error_address is also responsible for clearing |
709 | * error status | |
710 | */ | |
711 | if (adev->umc.funcs->query_ras_error_address) | |
712 | adev->umc.funcs->query_ras_error_address(adev, &err_data); | |
939e2258 | 713 | break; |
3e81ee9a HZ |
714 | case AMDGPU_RAS_BLOCK__SDMA: |
715 | if (adev->sdma.funcs->query_ras_error_count) { | |
716 | for (i = 0; i < adev->sdma.num_instances; i++) | |
717 | adev->sdma.funcs->query_ras_error_count(adev, i, | |
718 | &err_data); | |
719 | } | |
720 | break; | |
83b0582c DL |
721 | case AMDGPU_RAS_BLOCK__GFX: |
722 | if (adev->gfx.funcs->query_ras_error_count) | |
723 | adev->gfx.funcs->query_ras_error_count(adev, &err_data); | |
724 | break; | |
9fb2d8de | 725 | case AMDGPU_RAS_BLOCK__MMHUB: |
d65bf1f8 TZ |
726 | if (adev->mmhub.funcs->query_ras_error_count) |
727 | adev->mmhub.funcs->query_ras_error_count(adev, &err_data); | |
9fb2d8de | 728 | break; |
d7bd680d GC |
729 | case AMDGPU_RAS_BLOCK__PCIE_BIF: |
730 | if (adev->nbio.funcs->query_ras_error_count) | |
731 | adev->nbio.funcs->query_ras_error_count(adev, &err_data); | |
732 | break; | |
ec01fe2d HZ |
733 | case AMDGPU_RAS_BLOCK__XGMI_WAFL: |
734 | amdgpu_xgmi_query_ras_error_count(adev, &err_data); | |
735 | break; | |
939e2258 HZ |
736 | default: |
737 | break; | |
738 | } | |
05a58345 TZ |
739 | |
740 | obj->err_data.ue_count += err_data.ue_count; | |
741 | obj->err_data.ce_count += err_data.ce_count; | |
742 | ||
c030f2e4 | 743 | info->ue_count = obj->err_data.ue_count; |
744 | info->ce_count = obj->err_data.ce_count; | |
745 | ||
7c6e68c7 | 746 | if (err_data.ce_count) { |
05a58345 TZ |
747 | dev_info(adev->dev, "%ld correctable errors detected in %s block\n", |
748 | obj->err_data.ce_count, ras_block_str(info->head.block)); | |
7c6e68c7 AG |
749 | } |
750 | if (err_data.ue_count) { | |
05a58345 TZ |
751 | dev_info(adev->dev, "%ld uncorrectable errors detected in %s block\n", |
752 | obj->err_data.ue_count, ras_block_str(info->head.block)); | |
7c6e68c7 | 753 | } |
05a58345 | 754 | |
c030f2e4 | 755 | return 0; |
756 | } | |
757 | ||
758 | /* wrapper of psp_ras_trigger_error */ | |
759 | int amdgpu_ras_error_inject(struct amdgpu_device *adev, | |
760 | struct ras_inject_if *info) | |
761 | { | |
762 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); | |
763 | struct ta_ras_trigger_error_input block_info = { | |
828cfa29 | 764 | .block_id = amdgpu_ras_block_to_ta(info->head.block), |
765 | .inject_error_type = amdgpu_ras_error_to_ta(info->head.type), | |
c030f2e4 | 766 | .sub_block_index = info->head.sub_block_index, |
767 | .address = info->address, | |
768 | .value = info->value, | |
769 | }; | |
770 | int ret = 0; | |
771 | ||
772 | if (!obj) | |
773 | return -EINVAL; | |
774 | ||
a6c44d25 JC |
775 | /* Calculate XGMI relative offset */ |
776 | if (adev->gmc.xgmi.num_physical_nodes > 1) { | |
19744f5f HZ |
777 | block_info.address = |
778 | amdgpu_xgmi_get_relative_phy_addr(adev, | |
779 | block_info.address); | |
a6c44d25 JC |
780 | } |
781 | ||
83b0582c DL |
782 | switch (info->head.block) { |
783 | case AMDGPU_RAS_BLOCK__GFX: | |
784 | if (adev->gfx.funcs->ras_error_inject) | |
785 | ret = adev->gfx.funcs->ras_error_inject(adev, info); | |
786 | else | |
787 | ret = -EINVAL; | |
788 | break; | |
789 | case AMDGPU_RAS_BLOCK__UMC: | |
9fb2d8de | 790 | case AMDGPU_RAS_BLOCK__MMHUB: |
f3170352 | 791 | case AMDGPU_RAS_BLOCK__XGMI_WAFL: |
d7bd680d | 792 | case AMDGPU_RAS_BLOCK__PCIE_BIF: |
83b0582c DL |
793 | ret = psp_ras_trigger_error(&adev->psp, &block_info); |
794 | break; | |
795 | default: | |
a5dd40ca HZ |
796 | DRM_INFO("%s error injection is not supported yet\n", |
797 | ras_block_str(info->head.block)); | |
83b0582c | 798 | ret = -EINVAL; |
a5dd40ca HZ |
799 | } |
800 | ||
c030f2e4 | 801 | if (ret) |
802 | DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n", | |
803 | ras_block_str(info->head.block), | |
804 | ret); | |
805 | ||
806 | return ret; | |
807 | } | |
808 | ||
809 | int amdgpu_ras_error_cure(struct amdgpu_device *adev, | |
810 | struct ras_cure_if *info) | |
811 | { | |
812 | /* psp fw has no cure interface for now. */ | |
813 | return 0; | |
814 | } | |
815 | ||
816 | /* get the total error counts on all IPs */ | |
64cc5414 | 817 | unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev, |
c030f2e4 | 818 | bool is_ce) |
819 | { | |
820 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
821 | struct ras_manager *obj; | |
822 | struct ras_err_data data = {0, 0}; | |
823 | ||
824 | if (!con) | |
64cc5414 | 825 | return 0; |
c030f2e4 | 826 | |
827 | list_for_each_entry(obj, &con->head, node) { | |
828 | struct ras_query_if info = { | |
829 | .head = obj->head, | |
830 | }; | |
831 | ||
832 | if (amdgpu_ras_error_query(adev, &info)) | |
64cc5414 | 833 | return 0; |
c030f2e4 | 834 | |
835 | data.ce_count += info.ce_count; | |
836 | data.ue_count += info.ue_count; | |
837 | } | |
838 | ||
839 | return is_ce ? data.ce_count : data.ue_count; | |
840 | } | |
841 | /* query/inject/cure end */ | |
842 | ||
843 | ||
844 | /* sysfs begin */ | |
845 | ||
466b1793 | 846 | static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, |
847 | struct ras_badpage **bps, unsigned int *count); | |
848 | ||
849 | static char *amdgpu_ras_badpage_flags_str(unsigned int flags) | |
850 | { | |
851 | switch (flags) { | |
52dd95f2 | 852 | case AMDGPU_RAS_RETIRE_PAGE_RESERVED: |
466b1793 | 853 | return "R"; |
52dd95f2 | 854 | case AMDGPU_RAS_RETIRE_PAGE_PENDING: |
466b1793 | 855 | return "P"; |
52dd95f2 | 856 | case AMDGPU_RAS_RETIRE_PAGE_FAULT: |
466b1793 | 857 | default: |
858 | return "F"; | |
859 | }; | |
860 | } | |
861 | ||
f77c7109 AD |
862 | /** |
863 | * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface | |
466b1793 | 864 | * |
865 | * It allows user to read the bad pages of vram on the gpu through | |
866 | * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages | |
867 | * | |
868 | * It outputs multiple lines, and each line stands for one gpu page. | |
869 | * | |
870 | * The format of one line is below, | |
871 | * gpu pfn : gpu page size : flags | |
872 | * | |
873 | * gpu pfn and gpu page size are printed in hex format. | |
874 | * flags can be one of below character, | |
f77c7109 | 875 | * |
466b1793 | 876 | * R: reserved, this gpu page is reserved and not able to use. |
f77c7109 | 877 | * |
466b1793 | 878 | * P: pending for reserve, this gpu page is marked as bad, will be reserved |
f77c7109 AD |
879 | * in next window of page_reserve. |
880 | * | |
466b1793 | 881 | * F: unable to reserve. this gpu page can't be reserved due to some reasons. |
882 | * | |
f77c7109 AD |
883 | * Examples: |
884 | * | |
885 | * .. code-block:: bash | |
886 | * | |
887 | * 0x00000001 : 0x00001000 : R | |
888 | * 0x00000002 : 0x00001000 : P | |
889 | * | |
466b1793 | 890 | */ |
891 | ||
892 | static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f, | |
893 | struct kobject *kobj, struct bin_attribute *attr, | |
894 | char *buf, loff_t ppos, size_t count) | |
895 | { | |
896 | struct amdgpu_ras *con = | |
897 | container_of(attr, struct amdgpu_ras, badpages_attr); | |
898 | struct amdgpu_device *adev = con->adev; | |
899 | const unsigned int element_size = | |
900 | sizeof("0xabcdabcd : 0x12345678 : R\n") - 1; | |
d6ee400e SA |
901 | unsigned int start = div64_ul(ppos + element_size - 1, element_size); |
902 | unsigned int end = div64_ul(ppos + count - 1, element_size); | |
466b1793 | 903 | ssize_t s = 0; |
904 | struct ras_badpage *bps = NULL; | |
905 | unsigned int bps_count = 0; | |
906 | ||
907 | memset(buf, 0, count); | |
908 | ||
909 | if (amdgpu_ras_badpages_read(adev, &bps, &bps_count)) | |
910 | return 0; | |
911 | ||
912 | for (; start < end && start < bps_count; start++) | |
913 | s += scnprintf(&buf[s], element_size + 1, | |
914 | "0x%08x : 0x%08x : %1s\n", | |
915 | bps[start].bp, | |
916 | bps[start].size, | |
917 | amdgpu_ras_badpage_flags_str(bps[start].flags)); | |
918 | ||
919 | kfree(bps); | |
920 | ||
921 | return s; | |
922 | } | |
923 | ||
c030f2e4 | 924 | static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev, |
925 | struct device_attribute *attr, char *buf) | |
926 | { | |
927 | struct amdgpu_ras *con = | |
928 | container_of(attr, struct amdgpu_ras, features_attr); | |
c030f2e4 | 929 | |
5212a3bd | 930 | return scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features); |
c030f2e4 | 931 | } |
932 | ||
933 | static int amdgpu_ras_sysfs_create_feature_node(struct amdgpu_device *adev) | |
934 | { | |
935 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
936 | struct attribute *attrs[] = { | |
937 | &con->features_attr.attr, | |
938 | NULL | |
939 | }; | |
466b1793 | 940 | struct bin_attribute *bin_attrs[] = { |
941 | &con->badpages_attr, | |
942 | NULL | |
943 | }; | |
c030f2e4 | 944 | struct attribute_group group = { |
945 | .name = "ras", | |
946 | .attrs = attrs, | |
466b1793 | 947 | .bin_attrs = bin_attrs, |
c030f2e4 | 948 | }; |
949 | ||
950 | con->features_attr = (struct device_attribute) { | |
951 | .attr = { | |
952 | .name = "features", | |
953 | .mode = S_IRUGO, | |
954 | }, | |
955 | .show = amdgpu_ras_sysfs_features_read, | |
956 | }; | |
466b1793 | 957 | |
958 | con->badpages_attr = (struct bin_attribute) { | |
959 | .attr = { | |
960 | .name = "gpu_vram_bad_pages", | |
961 | .mode = S_IRUGO, | |
962 | }, | |
963 | .size = 0, | |
964 | .private = NULL, | |
965 | .read = amdgpu_ras_sysfs_badpages_read, | |
966 | }; | |
967 | ||
163def43 | 968 | sysfs_attr_init(attrs[0]); |
466b1793 | 969 | sysfs_bin_attr_init(bin_attrs[0]); |
c030f2e4 | 970 | |
971 | return sysfs_create_group(&adev->dev->kobj, &group); | |
972 | } | |
973 | ||
974 | static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev) | |
975 | { | |
976 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
977 | struct attribute *attrs[] = { | |
978 | &con->features_attr.attr, | |
979 | NULL | |
980 | }; | |
466b1793 | 981 | struct bin_attribute *bin_attrs[] = { |
982 | &con->badpages_attr, | |
983 | NULL | |
984 | }; | |
c030f2e4 | 985 | struct attribute_group group = { |
986 | .name = "ras", | |
987 | .attrs = attrs, | |
466b1793 | 988 | .bin_attrs = bin_attrs, |
c030f2e4 | 989 | }; |
990 | ||
991 | sysfs_remove_group(&adev->dev->kobj, &group); | |
992 | ||
993 | return 0; | |
994 | } | |
995 | ||
996 | int amdgpu_ras_sysfs_create(struct amdgpu_device *adev, | |
997 | struct ras_fs_if *head) | |
998 | { | |
999 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head); | |
1000 | ||
1001 | if (!obj || obj->attr_inuse) | |
1002 | return -EINVAL; | |
1003 | ||
1004 | get_obj(obj); | |
1005 | ||
1006 | memcpy(obj->fs_data.sysfs_name, | |
1007 | head->sysfs_name, | |
1008 | sizeof(obj->fs_data.sysfs_name)); | |
1009 | ||
1010 | obj->sysfs_attr = (struct device_attribute){ | |
1011 | .attr = { | |
1012 | .name = obj->fs_data.sysfs_name, | |
1013 | .mode = S_IRUGO, | |
1014 | }, | |
1015 | .show = amdgpu_ras_sysfs_read, | |
1016 | }; | |
163def43 | 1017 | sysfs_attr_init(&obj->sysfs_attr.attr); |
c030f2e4 | 1018 | |
1019 | if (sysfs_add_file_to_group(&adev->dev->kobj, | |
1020 | &obj->sysfs_attr.attr, | |
1021 | "ras")) { | |
1022 | put_obj(obj); | |
1023 | return -EINVAL; | |
1024 | } | |
1025 | ||
1026 | obj->attr_inuse = 1; | |
1027 | ||
1028 | return 0; | |
1029 | } | |
1030 | ||
1031 | int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev, | |
1032 | struct ras_common_if *head) | |
1033 | { | |
1034 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); | |
1035 | ||
1036 | if (!obj || !obj->attr_inuse) | |
1037 | return -EINVAL; | |
1038 | ||
1039 | sysfs_remove_file_from_group(&adev->dev->kobj, | |
1040 | &obj->sysfs_attr.attr, | |
1041 | "ras"); | |
1042 | obj->attr_inuse = 0; | |
1043 | put_obj(obj); | |
1044 | ||
1045 | return 0; | |
1046 | } | |
1047 | ||
1048 | static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev) | |
1049 | { | |
1050 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
1051 | struct ras_manager *obj, *tmp; | |
1052 | ||
1053 | list_for_each_entry_safe(obj, tmp, &con->head, node) { | |
1054 | amdgpu_ras_sysfs_remove(adev, &obj->head); | |
1055 | } | |
1056 | ||
1057 | amdgpu_ras_sysfs_remove_feature_node(adev); | |
1058 | ||
1059 | return 0; | |
1060 | } | |
1061 | /* sysfs end */ | |
1062 | ||
ef177d11 AD |
1063 | /** |
1064 | * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors | |
1065 | * | |
1066 | * Normally when there is an uncorrectable error, the driver will reset | |
1067 | * the GPU to recover. However, in the event of an unrecoverable error, | |
1068 | * the driver provides an interface to reboot the system automatically | |
1069 | * in that event. | |
1070 | * | |
1071 | * The following file in debugfs provides that interface: | |
1072 | * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot | |
1073 | * | |
1074 | * Usage: | |
1075 | * | |
1076 | * .. code-block:: bash | |
1077 | * | |
1078 | * echo true > .../ras/auto_reboot | |
1079 | * | |
1080 | */ | |
c030f2e4 | 1081 | /* debugfs begin */ |
450f30ea | 1082 | static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev) |
36ea1bd2 | 1083 | { |
1084 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
1085 | struct drm_minor *minor = adev->ddev->primary; | |
36ea1bd2 | 1086 | |
450f30ea | 1087 | con->dir = debugfs_create_dir("ras", minor->debugfs_root); |
012dd14d GC |
1088 | debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir, |
1089 | adev, &amdgpu_ras_debugfs_ctrl_ops); | |
1090 | debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, con->dir, | |
1091 | adev, &amdgpu_ras_debugfs_eeprom_ops); | |
c688a06b GC |
1092 | |
1093 | /* | |
1094 | * After one uncorrectable error happens, usually GPU recovery will | |
1095 | * be scheduled. But due to the known problem in GPU recovery failing | |
1096 | * to bring GPU back, below interface provides one direct way to | |
1097 | * user to reboot system automatically in such case within | |
1098 | * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine | |
1099 | * will never be called. | |
1100 | */ | |
1101 | debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, con->dir, | |
1102 | &con->reboot); | |
36ea1bd2 | 1103 | } |
1104 | ||
450f30ea | 1105 | void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, |
c030f2e4 | 1106 | struct ras_fs_if *head) |
1107 | { | |
1108 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
1109 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head); | |
c030f2e4 | 1110 | |
1111 | if (!obj || obj->ent) | |
450f30ea | 1112 | return; |
c030f2e4 | 1113 | |
1114 | get_obj(obj); | |
1115 | ||
1116 | memcpy(obj->fs_data.debugfs_name, | |
1117 | head->debugfs_name, | |
1118 | sizeof(obj->fs_data.debugfs_name)); | |
1119 | ||
450f30ea GKH |
1120 | obj->ent = debugfs_create_file(obj->fs_data.debugfs_name, |
1121 | S_IWUGO | S_IRUGO, con->dir, obj, | |
1122 | &amdgpu_ras_debugfs_ops); | |
c030f2e4 | 1123 | } |
1124 | ||
f9317014 TZ |
1125 | void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev) |
1126 | { | |
1127 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
c1509f3f | 1128 | struct ras_manager *obj; |
f9317014 TZ |
1129 | struct ras_fs_if fs_info; |
1130 | ||
1131 | /* | |
1132 | * it won't be called in resume path, no need to check | |
1133 | * suspend and gpu reset status | |
1134 | */ | |
1135 | if (!con) | |
1136 | return; | |
1137 | ||
1138 | amdgpu_ras_debugfs_create_ctrl_node(adev); | |
1139 | ||
c1509f3f | 1140 | list_for_each_entry(obj, &con->head, node) { |
f9317014 TZ |
1141 | if (amdgpu_ras_is_supported(adev, obj->head.block) && |
1142 | (obj->attr_inuse == 1)) { | |
1143 | sprintf(fs_info.debugfs_name, "%s_err_inject", | |
1144 | ras_block_str(obj->head.block)); | |
1145 | fs_info.head = obj->head; | |
1146 | amdgpu_ras_debugfs_create(adev, &fs_info); | |
1147 | } | |
1148 | } | |
1149 | } | |
1150 | ||
450f30ea | 1151 | void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev, |
c030f2e4 | 1152 | struct ras_common_if *head) |
1153 | { | |
1154 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); | |
1155 | ||
1156 | if (!obj || !obj->ent) | |
450f30ea | 1157 | return; |
c030f2e4 | 1158 | |
1159 | debugfs_remove(obj->ent); | |
1160 | obj->ent = NULL; | |
1161 | put_obj(obj); | |
c030f2e4 | 1162 | } |
1163 | ||
450f30ea | 1164 | static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev) |
c030f2e4 | 1165 | { |
1166 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
1167 | struct ras_manager *obj, *tmp; | |
1168 | ||
1169 | list_for_each_entry_safe(obj, tmp, &con->head, node) { | |
1170 | amdgpu_ras_debugfs_remove(adev, &obj->head); | |
1171 | } | |
1172 | ||
012dd14d | 1173 | debugfs_remove_recursive(con->dir); |
c030f2e4 | 1174 | con->dir = NULL; |
c030f2e4 | 1175 | } |
1176 | /* debugfs end */ | |
1177 | ||
1178 | /* ras fs */ | |
1179 | ||
1180 | static int amdgpu_ras_fs_init(struct amdgpu_device *adev) | |
1181 | { | |
c030f2e4 | 1182 | amdgpu_ras_sysfs_create_feature_node(adev); |
1183 | ||
1184 | return 0; | |
1185 | } | |
1186 | ||
1187 | static int amdgpu_ras_fs_fini(struct amdgpu_device *adev) | |
1188 | { | |
1189 | amdgpu_ras_debugfs_remove_all(adev); | |
1190 | amdgpu_ras_sysfs_remove_all(adev); | |
1191 | return 0; | |
1192 | } | |
1193 | /* ras fs end */ | |
1194 | ||
1195 | /* ih begin */ | |
1196 | static void amdgpu_ras_interrupt_handler(struct ras_manager *obj) | |
1197 | { | |
1198 | struct ras_ih_data *data = &obj->ih_data; | |
1199 | struct amdgpu_iv_entry entry; | |
1200 | int ret; | |
cf04dfd0 | 1201 | struct ras_err_data err_data = {0, 0, 0, NULL}; |
c030f2e4 | 1202 | |
1203 | while (data->rptr != data->wptr) { | |
1204 | rmb(); | |
1205 | memcpy(&entry, &data->ring[data->rptr], | |
1206 | data->element_size); | |
1207 | ||
1208 | wmb(); | |
1209 | data->rptr = (data->aligned_element_size + | |
1210 | data->rptr) % data->ring_size; | |
1211 | ||
1212 | /* Let IP handle its data, maybe we need get the output | |
1213 | * from the callback to udpate the error type/count, etc | |
1214 | */ | |
1215 | if (data->cb) { | |
cf04dfd0 | 1216 | ret = data->cb(obj->adev, &err_data, &entry); |
c030f2e4 | 1217 | /* ue will trigger an interrupt, and in that case |
1218 | * we need do a reset to recovery the whole system. | |
1219 | * But leave IP do that recovery, here we just dispatch | |
1220 | * the error. | |
1221 | */ | |
bd2280da | 1222 | if (ret == AMDGPU_RAS_SUCCESS) { |
51437623 TZ |
1223 | /* these counts could be left as 0 if |
1224 | * some blocks do not count error number | |
1225 | */ | |
cf04dfd0 | 1226 | obj->err_data.ue_count += err_data.ue_count; |
51437623 | 1227 | obj->err_data.ce_count += err_data.ce_count; |
c030f2e4 | 1228 | } |
c030f2e4 | 1229 | } |
1230 | } | |
1231 | } | |
1232 | ||
1233 | static void amdgpu_ras_interrupt_process_handler(struct work_struct *work) | |
1234 | { | |
1235 | struct ras_ih_data *data = | |
1236 | container_of(work, struct ras_ih_data, ih_work); | |
1237 | struct ras_manager *obj = | |
1238 | container_of(data, struct ras_manager, ih_data); | |
1239 | ||
1240 | amdgpu_ras_interrupt_handler(obj); | |
1241 | } | |
1242 | ||
1243 | int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev, | |
1244 | struct ras_dispatch_if *info) | |
1245 | { | |
1246 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); | |
1247 | struct ras_ih_data *data = &obj->ih_data; | |
1248 | ||
1249 | if (!obj) | |
1250 | return -EINVAL; | |
1251 | ||
1252 | if (data->inuse == 0) | |
1253 | return 0; | |
1254 | ||
1255 | /* Might be overflow... */ | |
1256 | memcpy(&data->ring[data->wptr], info->entry, | |
1257 | data->element_size); | |
1258 | ||
1259 | wmb(); | |
1260 | data->wptr = (data->aligned_element_size + | |
1261 | data->wptr) % data->ring_size; | |
1262 | ||
1263 | schedule_work(&data->ih_work); | |
1264 | ||
1265 | return 0; | |
1266 | } | |
1267 | ||
1268 | int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev, | |
1269 | struct ras_ih_if *info) | |
1270 | { | |
1271 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); | |
1272 | struct ras_ih_data *data; | |
1273 | ||
1274 | if (!obj) | |
1275 | return -EINVAL; | |
1276 | ||
1277 | data = &obj->ih_data; | |
1278 | if (data->inuse == 0) | |
1279 | return 0; | |
1280 | ||
1281 | cancel_work_sync(&data->ih_work); | |
1282 | ||
1283 | kfree(data->ring); | |
1284 | memset(data, 0, sizeof(*data)); | |
1285 | put_obj(obj); | |
1286 | ||
1287 | return 0; | |
1288 | } | |
1289 | ||
1290 | int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev, | |
1291 | struct ras_ih_if *info) | |
1292 | { | |
1293 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); | |
1294 | struct ras_ih_data *data; | |
1295 | ||
1296 | if (!obj) { | |
1297 | /* in case we registe the IH before enable ras feature */ | |
1298 | obj = amdgpu_ras_create_obj(adev, &info->head); | |
1299 | if (!obj) | |
1300 | return -EINVAL; | |
1301 | } else | |
1302 | get_obj(obj); | |
1303 | ||
1304 | data = &obj->ih_data; | |
1305 | /* add the callback.etc */ | |
1306 | *data = (struct ras_ih_data) { | |
1307 | .inuse = 0, | |
1308 | .cb = info->cb, | |
1309 | .element_size = sizeof(struct amdgpu_iv_entry), | |
1310 | .rptr = 0, | |
1311 | .wptr = 0, | |
1312 | }; | |
1313 | ||
1314 | INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler); | |
1315 | ||
1316 | data->aligned_element_size = ALIGN(data->element_size, 8); | |
1317 | /* the ring can store 64 iv entries. */ | |
1318 | data->ring_size = 64 * data->aligned_element_size; | |
1319 | data->ring = kmalloc(data->ring_size, GFP_KERNEL); | |
1320 | if (!data->ring) { | |
1321 | put_obj(obj); | |
1322 | return -ENOMEM; | |
1323 | } | |
1324 | ||
1325 | /* IH is ready */ | |
1326 | data->inuse = 1; | |
1327 | ||
1328 | return 0; | |
1329 | } | |
1330 | ||
1331 | static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev) | |
1332 | { | |
1333 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
1334 | struct ras_manager *obj, *tmp; | |
1335 | ||
1336 | list_for_each_entry_safe(obj, tmp, &con->head, node) { | |
1337 | struct ras_ih_if info = { | |
1338 | .head = obj->head, | |
1339 | }; | |
1340 | amdgpu_ras_interrupt_remove_handler(adev, &info); | |
1341 | } | |
1342 | ||
1343 | return 0; | |
1344 | } | |
1345 | /* ih end */ | |
1346 | ||
313c8fd3 GC |
1347 | /* traversal all IPs except NBIO to query error counter */ |
1348 | static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev) | |
1349 | { | |
1350 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
1351 | struct ras_manager *obj; | |
1352 | ||
1353 | if (!con) | |
1354 | return; | |
1355 | ||
1356 | list_for_each_entry(obj, &con->head, node) { | |
1357 | struct ras_query_if info = { | |
1358 | .head = obj->head, | |
1359 | }; | |
1360 | ||
1361 | /* | |
1362 | * PCIE_BIF IP has one different isr by ras controller | |
1363 | * interrupt, the specific ras counter query will be | |
1364 | * done in that isr. So skip such block from common | |
1365 | * sync flood interrupt isr calling. | |
1366 | */ | |
1367 | if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF) | |
1368 | continue; | |
1369 | ||
1370 | amdgpu_ras_error_query(adev, &info); | |
1371 | } | |
1372 | } | |
1373 | ||
c030f2e4 | 1374 | /* recovery begin */ |
466b1793 | 1375 | |
1376 | /* return 0 on success. | |
1377 | * caller need free bps. | |
1378 | */ | |
1379 | static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, | |
1380 | struct ras_badpage **bps, unsigned int *count) | |
1381 | { | |
1382 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
1383 | struct ras_err_handler_data *data; | |
1384 | int i = 0; | |
1385 | int ret = 0; | |
1386 | ||
1387 | if (!con || !con->eh_data || !bps || !count) | |
1388 | return -EINVAL; | |
1389 | ||
1390 | mutex_lock(&con->recovery_lock); | |
1391 | data = con->eh_data; | |
1392 | if (!data || data->count == 0) { | |
1393 | *bps = NULL; | |
46cf2fec | 1394 | ret = -EINVAL; |
466b1793 | 1395 | goto out; |
1396 | } | |
1397 | ||
1398 | *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL); | |
1399 | if (!*bps) { | |
1400 | ret = -ENOMEM; | |
1401 | goto out; | |
1402 | } | |
1403 | ||
1404 | for (; i < data->count; i++) { | |
1405 | (*bps)[i] = (struct ras_badpage){ | |
9dc23a63 | 1406 | .bp = data->bps[i].retired_page, |
466b1793 | 1407 | .size = AMDGPU_GPU_PAGE_SIZE, |
52dd95f2 | 1408 | .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED, |
466b1793 | 1409 | }; |
1410 | ||
1411 | if (data->last_reserved <= i) | |
52dd95f2 | 1412 | (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING; |
9dc23a63 | 1413 | else if (data->bps_bo[i] == NULL) |
52dd95f2 | 1414 | (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT; |
466b1793 | 1415 | } |
1416 | ||
1417 | *count = data->count; | |
1418 | out: | |
1419 | mutex_unlock(&con->recovery_lock); | |
1420 | return ret; | |
1421 | } | |
1422 | ||
c030f2e4 | 1423 | static void amdgpu_ras_do_recovery(struct work_struct *work) |
1424 | { | |
1425 | struct amdgpu_ras *ras = | |
1426 | container_of(work, struct amdgpu_ras, recovery_work); | |
1427 | ||
313c8fd3 GC |
1428 | /* |
1429 | * Query and print non zero error counter per IP block for | |
1430 | * awareness before recovering GPU. | |
1431 | */ | |
1432 | amdgpu_ras_log_on_err_counter(ras->adev); | |
1433 | ||
93af20f7 HZ |
1434 | if (amdgpu_device_should_recover_gpu(ras->adev)) |
1435 | amdgpu_device_gpu_recover(ras->adev, 0); | |
c030f2e4 | 1436 | atomic_set(&ras->in_recovery, 0); |
1437 | } | |
1438 | ||
c030f2e4 | 1439 | /* alloc/realloc bps array */ |
1440 | static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev, | |
1441 | struct ras_err_handler_data *data, int pages) | |
1442 | { | |
1443 | unsigned int old_space = data->count + data->space_left; | |
1444 | unsigned int new_space = old_space + pages; | |
9dc23a63 TZ |
1445 | unsigned int align_space = ALIGN(new_space, 512); |
1446 | void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL); | |
1447 | struct amdgpu_bo **bps_bo = | |
1448 | kmalloc(align_space * sizeof(*data->bps_bo), GFP_KERNEL); | |
1449 | ||
1450 | if (!bps || !bps_bo) { | |
1451 | kfree(bps); | |
1452 | kfree(bps_bo); | |
c030f2e4 | 1453 | return -ENOMEM; |
9dc23a63 | 1454 | } |
c030f2e4 | 1455 | |
1456 | if (data->bps) { | |
9dc23a63 | 1457 | memcpy(bps, data->bps, |
c030f2e4 | 1458 | data->count * sizeof(*data->bps)); |
1459 | kfree(data->bps); | |
1460 | } | |
9dc23a63 TZ |
1461 | if (data->bps_bo) { |
1462 | memcpy(bps_bo, data->bps_bo, | |
1463 | data->count * sizeof(*data->bps_bo)); | |
1464 | kfree(data->bps_bo); | |
1465 | } | |
c030f2e4 | 1466 | |
9dc23a63 TZ |
1467 | data->bps = bps; |
1468 | data->bps_bo = bps_bo; | |
c030f2e4 | 1469 | data->space_left += align_space - old_space; |
1470 | return 0; | |
1471 | } | |
1472 | ||
1473 | /* it deal with vram only. */ | |
1474 | int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, | |
9dc23a63 | 1475 | struct eeprom_table_record *bps, int pages) |
c030f2e4 | 1476 | { |
1477 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
73aa8e1a | 1478 | struct ras_err_handler_data *data; |
c030f2e4 | 1479 | int ret = 0; |
1480 | ||
73aa8e1a | 1481 | if (!con || !con->eh_data || !bps || pages <= 0) |
c030f2e4 | 1482 | return 0; |
1483 | ||
1484 | mutex_lock(&con->recovery_lock); | |
73aa8e1a | 1485 | data = con->eh_data; |
c030f2e4 | 1486 | if (!data) |
1487 | goto out; | |
1488 | ||
1489 | if (data->space_left <= pages) | |
1490 | if (amdgpu_ras_realloc_eh_data_space(adev, data, pages)) { | |
1491 | ret = -ENOMEM; | |
1492 | goto out; | |
1493 | } | |
1494 | ||
9dc23a63 TZ |
1495 | memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps)); |
1496 | data->count += pages; | |
c030f2e4 | 1497 | data->space_left -= pages; |
9dc23a63 | 1498 | |
c030f2e4 | 1499 | out: |
1500 | mutex_unlock(&con->recovery_lock); | |
1501 | ||
1502 | return ret; | |
1503 | } | |
1504 | ||
78ad00c9 TZ |
1505 | /* |
1506 | * write error record array to eeprom, the function should be | |
1507 | * protected by recovery_lock | |
1508 | */ | |
1509 | static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev) | |
1510 | { | |
1511 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
1512 | struct ras_err_handler_data *data; | |
8a3e801f | 1513 | struct amdgpu_ras_eeprom_control *control; |
78ad00c9 TZ |
1514 | int save_count; |
1515 | ||
1516 | if (!con || !con->eh_data) | |
1517 | return 0; | |
1518 | ||
8a3e801f | 1519 | control = &con->eeprom_control; |
78ad00c9 TZ |
1520 | data = con->eh_data; |
1521 | save_count = data->count - control->num_recs; | |
1522 | /* only new entries are saved */ | |
1523 | if (save_count > 0) | |
0771b0bf | 1524 | if (amdgpu_ras_eeprom_process_recods(control, |
78ad00c9 TZ |
1525 | &data->bps[control->num_recs], |
1526 | true, | |
1527 | save_count)) { | |
1528 | DRM_ERROR("Failed to save EEPROM table data!"); | |
1529 | return -EIO; | |
1530 | } | |
1531 | ||
1532 | return 0; | |
1533 | } | |
1534 | ||
1535 | /* | |
1536 | * read error record array in eeprom and reserve enough space for | |
1537 | * storing new bad pages | |
1538 | */ | |
1539 | static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev) | |
1540 | { | |
1541 | struct amdgpu_ras_eeprom_control *control = | |
1542 | &adev->psp.ras.ras->eeprom_control; | |
1543 | struct eeprom_table_record *bps = NULL; | |
1544 | int ret = 0; | |
1545 | ||
1546 | /* no bad page record, skip eeprom access */ | |
1547 | if (!control->num_recs) | |
1548 | return ret; | |
1549 | ||
1550 | bps = kcalloc(control->num_recs, sizeof(*bps), GFP_KERNEL); | |
1551 | if (!bps) | |
1552 | return -ENOMEM; | |
1553 | ||
1554 | if (amdgpu_ras_eeprom_process_recods(control, bps, false, | |
1555 | control->num_recs)) { | |
1556 | DRM_ERROR("Failed to load EEPROM table records!"); | |
1557 | ret = -EIO; | |
1558 | goto out; | |
1559 | } | |
1560 | ||
1561 | ret = amdgpu_ras_add_bad_pages(adev, bps, control->num_recs); | |
1562 | ||
1563 | out: | |
1564 | kfree(bps); | |
1565 | return ret; | |
1566 | } | |
1567 | ||
6e4be987 TZ |
1568 | /* |
1569 | * check if an address belongs to bad page | |
1570 | * | |
1571 | * Note: this check is only for umc block | |
1572 | */ | |
1573 | static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, | |
1574 | uint64_t addr) | |
1575 | { | |
1576 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
1577 | struct ras_err_handler_data *data; | |
1578 | int i; | |
1579 | bool ret = false; | |
1580 | ||
1581 | if (!con || !con->eh_data) | |
1582 | return ret; | |
1583 | ||
1584 | mutex_lock(&con->recovery_lock); | |
1585 | data = con->eh_data; | |
1586 | if (!data) | |
1587 | goto out; | |
1588 | ||
1589 | addr >>= AMDGPU_GPU_PAGE_SHIFT; | |
1590 | for (i = 0; i < data->count; i++) | |
1591 | if (addr == data->bps[i].retired_page) { | |
1592 | ret = true; | |
1593 | goto out; | |
1594 | } | |
1595 | ||
1596 | out: | |
1597 | mutex_unlock(&con->recovery_lock); | |
1598 | return ret; | |
1599 | } | |
1600 | ||
c030f2e4 | 1601 | /* called in gpu recovery/init */ |
1602 | int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev) | |
1603 | { | |
1604 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
73aa8e1a | 1605 | struct ras_err_handler_data *data; |
c030f2e4 | 1606 | uint64_t bp; |
de7b45ba | 1607 | struct amdgpu_bo *bo = NULL; |
78ad00c9 | 1608 | int i, ret = 0; |
c030f2e4 | 1609 | |
73aa8e1a | 1610 | if (!con || !con->eh_data) |
c030f2e4 | 1611 | return 0; |
1612 | ||
1613 | mutex_lock(&con->recovery_lock); | |
73aa8e1a | 1614 | data = con->eh_data; |
1615 | if (!data) | |
1616 | goto out; | |
c030f2e4 | 1617 | /* reserve vram at driver post stage. */ |
1618 | for (i = data->last_reserved; i < data->count; i++) { | |
9dc23a63 | 1619 | bp = data->bps[i].retired_page; |
c030f2e4 | 1620 | |
ae115c81 TZ |
1621 | /* There are two cases of reserve error should be ignored: |
1622 | * 1) a ras bad page has been allocated (used by someone); | |
1623 | * 2) a ras bad page has been reserved (duplicate error injection | |
1624 | * for one page); | |
1625 | */ | |
a142ba88 AD |
1626 | if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT, |
1627 | AMDGPU_GPU_PAGE_SIZE, | |
de7b45ba CK |
1628 | AMDGPU_GEM_DOMAIN_VRAM, |
1629 | &bo, NULL)) | |
ae115c81 | 1630 | DRM_WARN("RAS WARN: reserve vram for retired page %llx fail\n", bp); |
c030f2e4 | 1631 | |
9dc23a63 | 1632 | data->bps_bo[i] = bo; |
c030f2e4 | 1633 | data->last_reserved = i + 1; |
de7b45ba | 1634 | bo = NULL; |
c030f2e4 | 1635 | } |
78ad00c9 TZ |
1636 | |
1637 | /* continue to save bad pages to eeprom even reesrve_vram fails */ | |
1638 | ret = amdgpu_ras_save_bad_pages(adev); | |
73aa8e1a | 1639 | out: |
c030f2e4 | 1640 | mutex_unlock(&con->recovery_lock); |
78ad00c9 | 1641 | return ret; |
c030f2e4 | 1642 | } |
1643 | ||
1644 | /* called when driver unload */ | |
1645 | static int amdgpu_ras_release_bad_pages(struct amdgpu_device *adev) | |
1646 | { | |
1647 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
73aa8e1a | 1648 | struct ras_err_handler_data *data; |
c030f2e4 | 1649 | struct amdgpu_bo *bo; |
1650 | int i; | |
1651 | ||
73aa8e1a | 1652 | if (!con || !con->eh_data) |
c030f2e4 | 1653 | return 0; |
1654 | ||
1655 | mutex_lock(&con->recovery_lock); | |
73aa8e1a | 1656 | data = con->eh_data; |
1657 | if (!data) | |
1658 | goto out; | |
1659 | ||
c030f2e4 | 1660 | for (i = data->last_reserved - 1; i >= 0; i--) { |
9dc23a63 | 1661 | bo = data->bps_bo[i]; |
c030f2e4 | 1662 | |
de7b45ba | 1663 | amdgpu_bo_free_kernel(&bo, NULL, NULL); |
c030f2e4 | 1664 | |
9dc23a63 | 1665 | data->bps_bo[i] = bo; |
c030f2e4 | 1666 | data->last_reserved = i; |
1667 | } | |
73aa8e1a | 1668 | out: |
c030f2e4 | 1669 | mutex_unlock(&con->recovery_lock); |
1670 | return 0; | |
1671 | } | |
1672 | ||
1a6fc071 | 1673 | int amdgpu_ras_recovery_init(struct amdgpu_device *adev) |
c030f2e4 | 1674 | { |
1675 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
4d1337d2 | 1676 | struct ras_err_handler_data **data; |
78ad00c9 | 1677 | int ret; |
c030f2e4 | 1678 | |
4d1337d2 AG |
1679 | if (con) |
1680 | data = &con->eh_data; | |
1681 | else | |
1682 | return 0; | |
1683 | ||
1a6fc071 TZ |
1684 | *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO); |
1685 | if (!*data) { | |
1686 | ret = -ENOMEM; | |
1687 | goto out; | |
1688 | } | |
c030f2e4 | 1689 | |
1690 | mutex_init(&con->recovery_lock); | |
1691 | INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery); | |
1692 | atomic_set(&con->in_recovery, 0); | |
1693 | con->adev = adev; | |
1694 | ||
0771b0bf | 1695 | ret = amdgpu_ras_eeprom_init(&con->eeprom_control); |
78ad00c9 | 1696 | if (ret) |
1a6fc071 | 1697 | goto free; |
78ad00c9 | 1698 | |
0771b0bf | 1699 | if (con->eeprom_control.num_recs) { |
78ad00c9 TZ |
1700 | ret = amdgpu_ras_load_bad_pages(adev); |
1701 | if (ret) | |
1a6fc071 | 1702 | goto free; |
78ad00c9 TZ |
1703 | ret = amdgpu_ras_reserve_bad_pages(adev); |
1704 | if (ret) | |
1a6fc071 | 1705 | goto release; |
78ad00c9 | 1706 | } |
c030f2e4 | 1707 | |
1708 | return 0; | |
1a6fc071 TZ |
1709 | |
1710 | release: | |
1711 | amdgpu_ras_release_bad_pages(adev); | |
1712 | free: | |
1a6fc071 TZ |
1713 | kfree((*data)->bps); |
1714 | kfree((*data)->bps_bo); | |
1715 | kfree(*data); | |
1995b3a3 | 1716 | con->eh_data = NULL; |
1a6fc071 TZ |
1717 | out: |
1718 | DRM_WARN("Failed to initialize ras recovery!\n"); | |
1719 | ||
1720 | return ret; | |
c030f2e4 | 1721 | } |
1722 | ||
1723 | static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) | |
1724 | { | |
1725 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
1726 | struct ras_err_handler_data *data = con->eh_data; | |
1727 | ||
1a6fc071 TZ |
1728 | /* recovery_init failed to init it, fini is useless */ |
1729 | if (!data) | |
1730 | return 0; | |
1731 | ||
c030f2e4 | 1732 | cancel_work_sync(&con->recovery_work); |
c030f2e4 | 1733 | amdgpu_ras_release_bad_pages(adev); |
1734 | ||
1735 | mutex_lock(&con->recovery_lock); | |
1736 | con->eh_data = NULL; | |
1737 | kfree(data->bps); | |
1a6fc071 | 1738 | kfree(data->bps_bo); |
c030f2e4 | 1739 | kfree(data); |
1740 | mutex_unlock(&con->recovery_lock); | |
1741 | ||
1742 | return 0; | |
1743 | } | |
1744 | /* recovery end */ | |
1745 | ||
a564808e | 1746 | /* return 0 if ras will reset gpu and repost.*/ |
1747 | int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev, | |
1748 | unsigned int block) | |
1749 | { | |
1750 | struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); | |
1751 | ||
1752 | if (!ras) | |
1753 | return -EINVAL; | |
1754 | ||
1755 | ras->flags |= AMDGPU_RAS_FLAG_INIT_NEED_RESET; | |
1756 | return 0; | |
1757 | } | |
1758 | ||
5caf466a | 1759 | /* |
1760 | * check hardware's ras ability which will be saved in hw_supported. | |
1761 | * if hardware does not support ras, we can skip some ras initializtion and | |
1762 | * forbid some ras operations from IP. | |
1763 | * if software itself, say boot parameter, limit the ras ability. We still | |
1764 | * need allow IP do some limited operations, like disable. In such case, | |
1765 | * we have to initialize ras as normal. but need check if operation is | |
1766 | * allowed or not in each function. | |
1767 | */ | |
1768 | static void amdgpu_ras_check_supported(struct amdgpu_device *adev, | |
1769 | uint32_t *hw_supported, uint32_t *supported) | |
c030f2e4 | 1770 | { |
5caf466a | 1771 | *hw_supported = 0; |
1772 | *supported = 0; | |
c030f2e4 | 1773 | |
88474cca | 1774 | if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw || |
baaeb610 HZ |
1775 | (adev->asic_type != CHIP_VEGA20 && |
1776 | adev->asic_type != CHIP_ARCTURUS)) | |
5caf466a | 1777 | return; |
b404ae82 | 1778 | |
88474cca GC |
1779 | if (amdgpu_atomfirmware_mem_ecc_supported(adev)) { |
1780 | DRM_INFO("HBM ECC is active.\n"); | |
1781 | *hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC | | |
1782 | 1 << AMDGPU_RAS_BLOCK__DF); | |
1783 | } else | |
1784 | DRM_INFO("HBM ECC is not presented.\n"); | |
1785 | ||
1786 | if (amdgpu_atomfirmware_sram_ecc_supported(adev)) { | |
1787 | DRM_INFO("SRAM ECC is active.\n"); | |
1788 | *hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC | | |
1789 | 1 << AMDGPU_RAS_BLOCK__DF); | |
1790 | } else | |
1791 | DRM_INFO("SRAM ECC is not presented.\n"); | |
1792 | ||
1793 | /* hw_supported needs to be aligned with RAS block mask. */ | |
1794 | *hw_supported &= AMDGPU_RAS_BLOCK_MASK; | |
b404ae82 | 1795 | |
5caf466a | 1796 | *supported = amdgpu_ras_enable == 0 ? |
88474cca | 1797 | 0 : *hw_supported & amdgpu_ras_mask; |
c030f2e4 | 1798 | } |
1799 | ||
1800 | int amdgpu_ras_init(struct amdgpu_device *adev) | |
1801 | { | |
1802 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
4e644fff | 1803 | int r; |
c030f2e4 | 1804 | |
b404ae82 | 1805 | if (con) |
c030f2e4 | 1806 | return 0; |
1807 | ||
1808 | con = kmalloc(sizeof(struct amdgpu_ras) + | |
1809 | sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT, | |
1810 | GFP_KERNEL|__GFP_ZERO); | |
1811 | if (!con) | |
1812 | return -ENOMEM; | |
1813 | ||
1814 | con->objs = (struct ras_manager *)(con + 1); | |
1815 | ||
1816 | amdgpu_ras_set_context(adev, con); | |
1817 | ||
5caf466a | 1818 | amdgpu_ras_check_supported(adev, &con->hw_supported, |
1819 | &con->supported); | |
fb2a3607 HZ |
1820 | if (!con->hw_supported) { |
1821 | amdgpu_ras_set_context(adev, NULL); | |
1822 | kfree(con); | |
1823 | return 0; | |
1824 | } | |
1825 | ||
c030f2e4 | 1826 | con->features = 0; |
1827 | INIT_LIST_HEAD(&con->head); | |
108c6a63 | 1828 | /* Might need get this flag from vbios. */ |
1829 | con->flags = RAS_DEFAULT_FLAGS; | |
c030f2e4 | 1830 | |
4e644fff HZ |
1831 | if (adev->nbio.funcs->init_ras_controller_interrupt) { |
1832 | r = adev->nbio.funcs->init_ras_controller_interrupt(adev); | |
1833 | if (r) | |
1834 | return r; | |
1835 | } | |
1836 | ||
1837 | if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) { | |
1838 | r = adev->nbio.funcs->init_ras_err_event_athub_interrupt(adev); | |
1839 | if (r) | |
1840 | return r; | |
1841 | } | |
1842 | ||
c030f2e4 | 1843 | amdgpu_ras_mask &= AMDGPU_RAS_BLOCK_MASK; |
1844 | ||
c030f2e4 | 1845 | if (amdgpu_ras_fs_init(adev)) |
1846 | goto fs_out; | |
1847 | ||
5d0f903f | 1848 | DRM_INFO("RAS INFO: ras initialized successfully, " |
1849 | "hardware ability[%x] ras_mask[%x]\n", | |
1850 | con->hw_supported, con->supported); | |
c030f2e4 | 1851 | return 0; |
1852 | fs_out: | |
c030f2e4 | 1853 | amdgpu_ras_set_context(adev, NULL); |
1854 | kfree(con); | |
1855 | ||
1856 | return -EINVAL; | |
1857 | } | |
1858 | ||
b293e891 HZ |
1859 | /* helper function to handle common stuff in ip late init phase */ |
1860 | int amdgpu_ras_late_init(struct amdgpu_device *adev, | |
1861 | struct ras_common_if *ras_block, | |
1862 | struct ras_fs_if *fs_info, | |
1863 | struct ras_ih_if *ih_info) | |
1864 | { | |
1865 | int r; | |
1866 | ||
1867 | /* disable RAS feature per IP block if it is not supported */ | |
1868 | if (!amdgpu_ras_is_supported(adev, ras_block->block)) { | |
1869 | amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); | |
1870 | return 0; | |
1871 | } | |
1872 | ||
1873 | r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1); | |
1874 | if (r) { | |
1875 | if (r == -EAGAIN) { | |
1876 | /* request gpu reset. will run again */ | |
1877 | amdgpu_ras_request_reset_on_boot(adev, | |
1878 | ras_block->block); | |
1879 | return 0; | |
1880 | } else if (adev->in_suspend || adev->in_gpu_reset) { | |
1881 | /* in resume phase, if fail to enable ras, | |
1882 | * clean up all ras fs nodes, and disable ras */ | |
1883 | goto cleanup; | |
1884 | } else | |
1885 | return r; | |
1886 | } | |
1887 | ||
1888 | /* in resume phase, no need to create ras fs node */ | |
1889 | if (adev->in_suspend || adev->in_gpu_reset) | |
1890 | return 0; | |
1891 | ||
1892 | if (ih_info->cb) { | |
1893 | r = amdgpu_ras_interrupt_add_handler(adev, ih_info); | |
1894 | if (r) | |
1895 | goto interrupt; | |
1896 | } | |
1897 | ||
b293e891 HZ |
1898 | r = amdgpu_ras_sysfs_create(adev, fs_info); |
1899 | if (r) | |
1900 | goto sysfs; | |
1901 | ||
1902 | return 0; | |
1903 | cleanup: | |
1904 | amdgpu_ras_sysfs_remove(adev, ras_block); | |
1905 | sysfs: | |
b293e891 HZ |
1906 | if (ih_info->cb) |
1907 | amdgpu_ras_interrupt_remove_handler(adev, ih_info); | |
1908 | interrupt: | |
1909 | amdgpu_ras_feature_enable(adev, ras_block, 0); | |
1910 | return r; | |
1911 | } | |
1912 | ||
1913 | /* helper function to remove ras fs node and interrupt handler */ | |
1914 | void amdgpu_ras_late_fini(struct amdgpu_device *adev, | |
1915 | struct ras_common_if *ras_block, | |
1916 | struct ras_ih_if *ih_info) | |
1917 | { | |
1918 | if (!ras_block || !ih_info) | |
1919 | return; | |
1920 | ||
1921 | amdgpu_ras_sysfs_remove(adev, ras_block); | |
b293e891 HZ |
1922 | if (ih_info->cb) |
1923 | amdgpu_ras_interrupt_remove_handler(adev, ih_info); | |
1924 | amdgpu_ras_feature_enable(adev, ras_block, 0); | |
1925 | } | |
1926 | ||
a564808e | 1927 | /* do some init work after IP late init as dependence. |
511fdbc3 | 1928 | * and it runs in resume/gpu reset/booting up cases. |
a564808e | 1929 | */ |
511fdbc3 | 1930 | void amdgpu_ras_resume(struct amdgpu_device *adev) |
108c6a63 | 1931 | { |
1932 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
1933 | struct ras_manager *obj, *tmp; | |
1934 | ||
1935 | if (!con) | |
1936 | return; | |
1937 | ||
108c6a63 | 1938 | if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { |
191051a1 | 1939 | /* Set up all other IPs which are not implemented. There is a |
1940 | * tricky thing that IP's actual ras error type should be | |
1941 | * MULTI_UNCORRECTABLE, but as driver does not handle it, so | |
1942 | * ERROR_NONE make sense anyway. | |
1943 | */ | |
1944 | amdgpu_ras_enable_all_features(adev, 1); | |
1945 | ||
1946 | /* We enable ras on all hw_supported block, but as boot | |
1947 | * parameter might disable some of them and one or more IP has | |
1948 | * not implemented yet. So we disable them on behalf. | |
1949 | */ | |
108c6a63 | 1950 | list_for_each_entry_safe(obj, tmp, &con->head, node) { |
1951 | if (!amdgpu_ras_is_supported(adev, obj->head.block)) { | |
1952 | amdgpu_ras_feature_enable(adev, &obj->head, 0); | |
1953 | /* there should be no any reference. */ | |
1954 | WARN_ON(alive_obj(obj)); | |
1955 | } | |
191051a1 | 1956 | } |
108c6a63 | 1957 | } |
a564808e | 1958 | |
1959 | if (con->flags & AMDGPU_RAS_FLAG_INIT_NEED_RESET) { | |
1960 | con->flags &= ~AMDGPU_RAS_FLAG_INIT_NEED_RESET; | |
1961 | /* setup ras obj state as disabled. | |
1962 | * for init_by_vbios case. | |
1963 | * if we want to enable ras, just enable it in a normal way. | |
1964 | * If we want do disable it, need setup ras obj as enabled, | |
1965 | * then issue another TA disable cmd. | |
1966 | * See feature_enable_on_boot | |
1967 | */ | |
1968 | amdgpu_ras_disable_all_features(adev, 1); | |
61934624 | 1969 | amdgpu_ras_reset_gpu(adev); |
a564808e | 1970 | } |
108c6a63 | 1971 | } |
1972 | ||
511fdbc3 | 1973 | void amdgpu_ras_suspend(struct amdgpu_device *adev) |
1974 | { | |
1975 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
1976 | ||
1977 | if (!con) | |
1978 | return; | |
1979 | ||
1980 | amdgpu_ras_disable_all_features(adev, 0); | |
1981 | /* Make sure all ras objects are disabled. */ | |
1982 | if (con->features) | |
1983 | amdgpu_ras_disable_all_features(adev, 1); | |
1984 | } | |
1985 | ||
c030f2e4 | 1986 | /* do some fini work before IP fini as dependence */ |
1987 | int amdgpu_ras_pre_fini(struct amdgpu_device *adev) | |
1988 | { | |
1989 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
1990 | ||
1991 | if (!con) | |
1992 | return 0; | |
1993 | ||
1994 | /* Need disable ras on all IPs here before ip [hw/sw]fini */ | |
1995 | amdgpu_ras_disable_all_features(adev, 0); | |
1996 | amdgpu_ras_recovery_fini(adev); | |
1997 | return 0; | |
1998 | } | |
1999 | ||
2000 | int amdgpu_ras_fini(struct amdgpu_device *adev) | |
2001 | { | |
2002 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |
2003 | ||
2004 | if (!con) | |
2005 | return 0; | |
2006 | ||
2007 | amdgpu_ras_fs_fini(adev); | |
2008 | amdgpu_ras_interrupt_remove_all(adev); | |
2009 | ||
2010 | WARN(con->features, "Feature mask is not cleared"); | |
2011 | ||
2012 | if (con->features) | |
2013 | amdgpu_ras_disable_all_features(adev, 1); | |
2014 | ||
2015 | amdgpu_ras_set_context(adev, NULL); | |
2016 | kfree(con); | |
2017 | ||
2018 | return 0; | |
2019 | } | |
7c6e68c7 AG |
2020 | |
2021 | void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) | |
2022 | { | |
ed606f8a AG |
2023 | uint32_t hw_supported, supported; |
2024 | ||
2025 | amdgpu_ras_check_supported(adev, &hw_supported, &supported); | |
2026 | if (!hw_supported) | |
2027 | return; | |
2028 | ||
7c6e68c7 | 2029 | if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) { |
d5ea093e AG |
2030 | DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n"); |
2031 | ||
61934624 | 2032 | amdgpu_ras_reset_gpu(adev); |
7c6e68c7 AG |
2033 | } |
2034 | } |