drm/amd/display: Add feature flags to disable LTTPR
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
b1ddf548 28#include <linux/power_supply.h>
0875dc9e 29#include <linux/kthread.h>
fdf2f6c5 30#include <linux/module.h>
d38ceaf9
AD
31#include <linux/console.h>
32#include <linux/slab.h>
fdf2f6c5 33
4562236b 34#include <drm/drm_atomic_helper.h>
fcd70cd3 35#include <drm/drm_probe_helper.h>
d38ceaf9
AD
36#include <drm/amdgpu_drm.h>
37#include <linux/vgaarb.h>
38#include <linux/vga_switcheroo.h>
39#include <linux/efi.h>
40#include "amdgpu.h"
f4b373f4 41#include "amdgpu_trace.h"
d38ceaf9
AD
42#include "amdgpu_i2c.h"
43#include "atom.h"
44#include "amdgpu_atombios.h"
a5bde2f9 45#include "amdgpu_atomfirmware.h"
d0dd7f0c 46#include "amd_pcie.h"
33f34802
KW
47#ifdef CONFIG_DRM_AMDGPU_SI
48#include "si.h"
49#endif
a2e73f56
AD
50#ifdef CONFIG_DRM_AMDGPU_CIK
51#include "cik.h"
52#endif
aaa36a97 53#include "vi.h"
460826e6 54#include "soc15.h"
0a5b8c7b 55#include "nv.h"
d38ceaf9 56#include "bif/bif_4_1_d.h"
9accf2fd 57#include <linux/pci.h>
bec86378 58#include <linux/firmware.h>
89041940 59#include "amdgpu_vf_error.h"
d38ceaf9 60
ba997709 61#include "amdgpu_amdkfd.h"
d2f52ac8 62#include "amdgpu_pm.h"
d38ceaf9 63
5183411b 64#include "amdgpu_xgmi.h"
c030f2e4 65#include "amdgpu_ras.h"
9c7c85f7 66#include "amdgpu_pmu.h"
bd607166 67#include "amdgpu_fru_eeprom.h"
04442bf7 68#include "amdgpu_reset.h"
5183411b 69
d5ea093e 70#include <linux/suspend.h>
c6a6e2db 71#include <drm/task_barrier.h>
3f12acc8 72#include <linux/pm_runtime.h>
d5ea093e 73
f89f8c6b
AG
74#include <drm/drm_drv.h>
75
e2a75f88 76MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
3f76dced 77MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
2d2e5e7e 78MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
ad5a67a7 79MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
54c4d17e 80MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
65e60f6e 81MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
b51a26a0 82MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
23c6268e 83MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
ed42cfe1 84MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
42b325e5 85MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
4e52a9f8 86MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
8bf84f60 87MODULE_FIRMWARE("amdgpu/yellow_carp_gpu_info.bin");
e2a75f88 88
2dc80b00
S
89#define AMDGPU_RESUME_MS 2000
90
050091ab 91const char *amdgpu_asic_name[] = {
da69c161
KW
92 "TAHITI",
93 "PITCAIRN",
94 "VERDE",
95 "OLAND",
96 "HAINAN",
d38ceaf9
AD
97 "BONAIRE",
98 "KAVERI",
99 "KABINI",
100 "HAWAII",
101 "MULLINS",
102 "TOPAZ",
103 "TONGA",
48299f95 104 "FIJI",
d38ceaf9 105 "CARRIZO",
139f4917 106 "STONEY",
2cc0c0b5
FC
107 "POLARIS10",
108 "POLARIS11",
c4642a47 109 "POLARIS12",
48ff108d 110 "VEGAM",
d4196f01 111 "VEGA10",
8fab806a 112 "VEGA12",
956fcddc 113 "VEGA20",
2ca8a5d2 114 "RAVEN",
d6c3b24e 115 "ARCTURUS",
1eee4228 116 "RENOIR",
d46b417a 117 "ALDEBARAN",
852a6626 118 "NAVI10",
d0f56dc2 119 "CYAN_SKILLFISH",
87dbad02 120 "NAVI14",
9802f5d7 121 "NAVI12",
ccaf72d3 122 "SIENNA_CICHLID",
ddd8fbe7 123 "NAVY_FLOUNDER",
4f1e9a76 124 "VANGOGH",
a2468e04 125 "DIMGREY_CAVEFISH",
6f169591 126 "BEIGE_GOBY",
ee9236b7 127 "YELLOW_CARP",
3ae695d6 128 "IP DISCOVERY",
d38ceaf9
AD
129 "LAST",
130};
131
dcea6e65
KR
132/**
133 * DOC: pcie_replay_count
134 *
135 * The amdgpu driver provides a sysfs API for reporting the total number
136 * of PCIe replays (NAKs)
137 * The file pcie_replay_count is used for this and returns the total
138 * number of replays as a sum of the NAKs generated and NAKs received
139 */
140
141static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
142 struct device_attribute *attr, char *buf)
143{
144 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 145 struct amdgpu_device *adev = drm_to_adev(ddev);
dcea6e65
KR
146 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
147
36000c7a 148 return sysfs_emit(buf, "%llu\n", cnt);
dcea6e65
KR
149}
150
151static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
152 amdgpu_device_get_pcie_replay_count, NULL);
153
5494d864
AD
154static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
155
bd607166
KR
156/**
157 * DOC: product_name
158 *
159 * The amdgpu driver provides a sysfs API for reporting the product name
160 * for the device
161 * The file serial_number is used for this and returns the product name
162 * as returned from the FRU.
163 * NOTE: This is only available for certain server cards
164 */
165
166static ssize_t amdgpu_device_get_product_name(struct device *dev,
167 struct device_attribute *attr, char *buf)
168{
169 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 170 struct amdgpu_device *adev = drm_to_adev(ddev);
bd607166 171
36000c7a 172 return sysfs_emit(buf, "%s\n", adev->product_name);
bd607166
KR
173}
174
175static DEVICE_ATTR(product_name, S_IRUGO,
176 amdgpu_device_get_product_name, NULL);
177
178/**
179 * DOC: product_number
180 *
181 * The amdgpu driver provides a sysfs API for reporting the part number
182 * for the device
183 * The file serial_number is used for this and returns the part number
184 * as returned from the FRU.
185 * NOTE: This is only available for certain server cards
186 */
187
188static ssize_t amdgpu_device_get_product_number(struct device *dev,
189 struct device_attribute *attr, char *buf)
190{
191 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 192 struct amdgpu_device *adev = drm_to_adev(ddev);
bd607166 193
36000c7a 194 return sysfs_emit(buf, "%s\n", adev->product_number);
bd607166
KR
195}
196
197static DEVICE_ATTR(product_number, S_IRUGO,
198 amdgpu_device_get_product_number, NULL);
199
200/**
201 * DOC: serial_number
202 *
203 * The amdgpu driver provides a sysfs API for reporting the serial number
204 * for the device
205 * The file serial_number is used for this and returns the serial number
206 * as returned from the FRU.
207 * NOTE: This is only available for certain server cards
208 */
209
210static ssize_t amdgpu_device_get_serial_number(struct device *dev,
211 struct device_attribute *attr, char *buf)
212{
213 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 214 struct amdgpu_device *adev = drm_to_adev(ddev);
bd607166 215
36000c7a 216 return sysfs_emit(buf, "%s\n", adev->serial);
bd607166
KR
217}
218
219static DEVICE_ATTR(serial_number, S_IRUGO,
220 amdgpu_device_get_serial_number, NULL);
221
fd496ca8 222/**
b98c6299 223 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
fd496ca8
AD
224 *
225 * @dev: drm_device pointer
226 *
b98c6299 227 * Returns true if the device is a dGPU with ATPX power control,
fd496ca8
AD
228 * otherwise return false.
229 */
b98c6299 230bool amdgpu_device_supports_px(struct drm_device *dev)
fd496ca8
AD
231{
232 struct amdgpu_device *adev = drm_to_adev(dev);
233
b98c6299 234 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
fd496ca8
AD
235 return true;
236 return false;
237}
238
e3ecdffa 239/**
0330b848 240 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
e3ecdffa
AD
241 *
242 * @dev: drm_device pointer
243 *
b98c6299 244 * Returns true if the device is a dGPU with ACPI power control,
e3ecdffa
AD
245 * otherwise return false.
246 */
31af062a 247bool amdgpu_device_supports_boco(struct drm_device *dev)
d38ceaf9 248{
1348969a 249 struct amdgpu_device *adev = drm_to_adev(dev);
d38ceaf9 250
b98c6299
AD
251 if (adev->has_pr3 ||
252 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
d38ceaf9
AD
253 return true;
254 return false;
255}
256
a69cba42
AD
257/**
258 * amdgpu_device_supports_baco - Does the device support BACO
259 *
260 * @dev: drm_device pointer
261 *
262 * Returns true if the device supporte BACO,
263 * otherwise return false.
264 */
265bool amdgpu_device_supports_baco(struct drm_device *dev)
266{
1348969a 267 struct amdgpu_device *adev = drm_to_adev(dev);
a69cba42
AD
268
269 return amdgpu_asic_supports_baco(adev);
270}
271
3fa8f89d
S
272/**
273 * amdgpu_device_supports_smart_shift - Is the device dGPU with
274 * smart shift support
275 *
276 * @dev: drm_device pointer
277 *
278 * Returns true if the device is a dGPU with Smart Shift support,
279 * otherwise returns false.
280 */
281bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
282{
283 return (amdgpu_device_supports_boco(dev) &&
284 amdgpu_acpi_is_power_shift_control_supported());
285}
286
6e3cd2a9
MCC
287/*
288 * VRAM access helper functions
289 */
290
e35e2b11 291/**
048af66b 292 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
e35e2b11
TY
293 *
294 * @adev: amdgpu_device pointer
295 * @pos: offset of the buffer in vram
296 * @buf: virtual address of the buffer in system memory
297 * @size: read/write size, sizeof(@buf) must > @size
298 * @write: true - write to vram, otherwise - read from vram
299 */
048af66b
KW
300void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
301 void *buf, size_t size, bool write)
e35e2b11 302{
e35e2b11 303 unsigned long flags;
048af66b
KW
304 uint32_t hi = ~0, tmp = 0;
305 uint32_t *data = buf;
ce05ac56 306 uint64_t last;
f89f8c6b 307 int idx;
ce05ac56 308
c58a863b 309 if (!drm_dev_enter(adev_to_drm(adev), &idx))
f89f8c6b 310 return;
9d11eb0d 311
048af66b
KW
312 BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
313
314 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
315 for (last = pos + size; pos < last; pos += 4) {
316 tmp = pos >> 31;
317
318 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
319 if (tmp != hi) {
320 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
321 hi = tmp;
322 }
323 if (write)
324 WREG32_NO_KIQ(mmMM_DATA, *data++);
325 else
326 *data++ = RREG32_NO_KIQ(mmMM_DATA);
327 }
328
329 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
330 drm_dev_exit(idx);
331}
332
333/**
334 * amdgpu_device_vram_access - access vram by vram aperature
335 *
336 * @adev: amdgpu_device pointer
337 * @pos: offset of the buffer in vram
338 * @buf: virtual address of the buffer in system memory
339 * @size: read/write size, sizeof(@buf) must > @size
340 * @write: true - write to vram, otherwise - read from vram
341 *
342 * The return value means how many bytes have been transferred.
343 */
344size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
345 void *buf, size_t size, bool write)
346{
9d11eb0d 347#ifdef CONFIG_64BIT
048af66b
KW
348 void __iomem *addr;
349 size_t count = 0;
350 uint64_t last;
351
352 if (!adev->mman.aper_base_kaddr)
353 return 0;
354
9d11eb0d
CK
355 last = min(pos + size, adev->gmc.visible_vram_size);
356 if (last > pos) {
048af66b
KW
357 addr = adev->mman.aper_base_kaddr + pos;
358 count = last - pos;
9d11eb0d
CK
359
360 if (write) {
361 memcpy_toio(addr, buf, count);
362 mb();
810085dd 363 amdgpu_device_flush_hdp(adev, NULL);
9d11eb0d 364 } else {
810085dd 365 amdgpu_device_invalidate_hdp(adev, NULL);
9d11eb0d
CK
366 mb();
367 memcpy_fromio(buf, addr, count);
368 }
369
9d11eb0d 370 }
048af66b
KW
371
372 return count;
373#else
374 return 0;
9d11eb0d 375#endif
048af66b 376}
9d11eb0d 377
048af66b
KW
378/**
379 * amdgpu_device_vram_access - read/write a buffer in vram
380 *
381 * @adev: amdgpu_device pointer
382 * @pos: offset of the buffer in vram
383 * @buf: virtual address of the buffer in system memory
384 * @size: read/write size, sizeof(@buf) must > @size
385 * @write: true - write to vram, otherwise - read from vram
386 */
387void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
388 void *buf, size_t size, bool write)
389{
390 size_t count;
e35e2b11 391
048af66b
KW
392 /* try to using vram apreature to access vram first */
393 count = amdgpu_device_aper_access(adev, pos, buf, size, write);
394 size -= count;
395 if (size) {
396 /* using MM to access rest vram */
397 pos += count;
398 buf += count;
399 amdgpu_device_mm_access(adev, pos, buf, size, write);
e35e2b11
TY
400 }
401}
402
d38ceaf9 403/*
f7ee1874 404 * register access helper functions.
d38ceaf9 405 */
56b53c0b
DL
406
407/* Check if hw access should be skipped because of hotplug or device error */
408bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
409{
7afefb81 410 if (adev->no_hw_access)
56b53c0b
DL
411 return true;
412
413#ifdef CONFIG_LOCKDEP
414 /*
415 * This is a bit complicated to understand, so worth a comment. What we assert
416 * here is that the GPU reset is not running on another thread in parallel.
417 *
418 * For this we trylock the read side of the reset semaphore, if that succeeds
419 * we know that the reset is not running in paralell.
420 *
421 * If the trylock fails we assert that we are either already holding the read
422 * side of the lock or are the reset thread itself and hold the write side of
423 * the lock.
424 */
425 if (in_task()) {
426 if (down_read_trylock(&adev->reset_sem))
427 up_read(&adev->reset_sem);
428 else
429 lockdep_assert_held(&adev->reset_sem);
430 }
431#endif
432 return false;
433}
434
e3ecdffa 435/**
f7ee1874 436 * amdgpu_device_rreg - read a memory mapped IO or indirect register
e3ecdffa
AD
437 *
438 * @adev: amdgpu_device pointer
439 * @reg: dword aligned register offset
440 * @acc_flags: access flags which require special behavior
441 *
442 * Returns the 32 bit value from the offset specified.
443 */
f7ee1874
HZ
444uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
445 uint32_t reg, uint32_t acc_flags)
d38ceaf9 446{
f4b373f4
TSD
447 uint32_t ret;
448
56b53c0b 449 if (amdgpu_device_skip_hw_access(adev))
bf36b52e
AG
450 return 0;
451
f7ee1874
HZ
452 if ((reg * 4) < adev->rmmio_size) {
453 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
454 amdgpu_sriov_runtime(adev) &&
455 down_read_trylock(&adev->reset_sem)) {
456 ret = amdgpu_kiq_rreg(adev, reg);
457 up_read(&adev->reset_sem);
458 } else {
459 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
460 }
461 } else {
462 ret = adev->pcie_rreg(adev, reg * 4);
81202807 463 }
bc992ba5 464
f7ee1874 465 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
e78b579d 466
f4b373f4 467 return ret;
d38ceaf9
AD
468}
469
421a2a30
ML
470/*
471 * MMIO register read with bytes helper functions
472 * @offset:bytes offset from MMIO start
473 *
474*/
475
e3ecdffa
AD
476/**
477 * amdgpu_mm_rreg8 - read a memory mapped IO register
478 *
479 * @adev: amdgpu_device pointer
480 * @offset: byte aligned register offset
481 *
482 * Returns the 8 bit value from the offset specified.
483 */
7cbbc745
AG
484uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
485{
56b53c0b 486 if (amdgpu_device_skip_hw_access(adev))
bf36b52e
AG
487 return 0;
488
421a2a30
ML
489 if (offset < adev->rmmio_size)
490 return (readb(adev->rmmio + offset));
491 BUG();
492}
493
494/*
495 * MMIO register write with bytes helper functions
496 * @offset:bytes offset from MMIO start
497 * @value: the value want to be written to the register
498 *
499*/
e3ecdffa
AD
500/**
501 * amdgpu_mm_wreg8 - read a memory mapped IO register
502 *
503 * @adev: amdgpu_device pointer
504 * @offset: byte aligned register offset
505 * @value: 8 bit value to write
506 *
507 * Writes the value specified to the offset specified.
508 */
7cbbc745
AG
509void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
510{
56b53c0b 511 if (amdgpu_device_skip_hw_access(adev))
bf36b52e
AG
512 return;
513
421a2a30
ML
514 if (offset < adev->rmmio_size)
515 writeb(value, adev->rmmio + offset);
516 else
517 BUG();
518}
519
e3ecdffa 520/**
f7ee1874 521 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
e3ecdffa
AD
522 *
523 * @adev: amdgpu_device pointer
524 * @reg: dword aligned register offset
525 * @v: 32 bit value to write to the register
526 * @acc_flags: access flags which require special behavior
527 *
528 * Writes the value specified to the offset specified.
529 */
f7ee1874
HZ
530void amdgpu_device_wreg(struct amdgpu_device *adev,
531 uint32_t reg, uint32_t v,
532 uint32_t acc_flags)
d38ceaf9 533{
56b53c0b 534 if (amdgpu_device_skip_hw_access(adev))
bf36b52e
AG
535 return;
536
f7ee1874
HZ
537 if ((reg * 4) < adev->rmmio_size) {
538 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
539 amdgpu_sriov_runtime(adev) &&
540 down_read_trylock(&adev->reset_sem)) {
541 amdgpu_kiq_wreg(adev, reg, v);
542 up_read(&adev->reset_sem);
543 } else {
544 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
545 }
546 } else {
547 adev->pcie_wreg(adev, reg * 4, v);
81202807 548 }
bc992ba5 549
f7ee1874 550 trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
2e0cc4d4 551}
d38ceaf9 552
2e0cc4d4
ML
553/*
554 * amdgpu_mm_wreg_mmio_rlc - write register either with mmio or with RLC path if in range
555 *
556 * this function is invoked only the debugfs register access
557 * */
f7ee1874
HZ
558void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
559 uint32_t reg, uint32_t v)
2e0cc4d4 560{
56b53c0b 561 if (amdgpu_device_skip_hw_access(adev))
bf36b52e
AG
562 return;
563
2e0cc4d4 564 if (amdgpu_sriov_fullaccess(adev) &&
f7ee1874
HZ
565 adev->gfx.rlc.funcs &&
566 adev->gfx.rlc.funcs->is_rlcg_access_range) {
2e0cc4d4 567 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
1a4772d9 568 return adev->gfx.rlc.funcs->sriov_wreg(adev, reg, v, 0, 0);
f7ee1874
HZ
569 } else {
570 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
47ed4e1c 571 }
d38ceaf9
AD
572}
573
d38ceaf9
AD
574/**
575 * amdgpu_mm_rdoorbell - read a doorbell dword
576 *
577 * @adev: amdgpu_device pointer
578 * @index: doorbell index
579 *
580 * Returns the value in the doorbell aperture at the
581 * requested doorbell index (CIK).
582 */
583u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
584{
56b53c0b 585 if (amdgpu_device_skip_hw_access(adev))
bf36b52e
AG
586 return 0;
587
d38ceaf9
AD
588 if (index < adev->doorbell.num_doorbells) {
589 return readl(adev->doorbell.ptr + index);
590 } else {
591 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
592 return 0;
593 }
594}
595
596/**
597 * amdgpu_mm_wdoorbell - write a doorbell dword
598 *
599 * @adev: amdgpu_device pointer
600 * @index: doorbell index
601 * @v: value to write
602 *
603 * Writes @v to the doorbell aperture at the
604 * requested doorbell index (CIK).
605 */
606void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
607{
56b53c0b 608 if (amdgpu_device_skip_hw_access(adev))
bf36b52e
AG
609 return;
610
d38ceaf9
AD
611 if (index < adev->doorbell.num_doorbells) {
612 writel(v, adev->doorbell.ptr + index);
613 } else {
614 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
615 }
616}
617
832be404
KW
618/**
619 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
620 *
621 * @adev: amdgpu_device pointer
622 * @index: doorbell index
623 *
624 * Returns the value in the doorbell aperture at the
625 * requested doorbell index (VEGA10+).
626 */
627u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
628{
56b53c0b 629 if (amdgpu_device_skip_hw_access(adev))
bf36b52e
AG
630 return 0;
631
832be404
KW
632 if (index < adev->doorbell.num_doorbells) {
633 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
634 } else {
635 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
636 return 0;
637 }
638}
639
640/**
641 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
642 *
643 * @adev: amdgpu_device pointer
644 * @index: doorbell index
645 * @v: value to write
646 *
647 * Writes @v to the doorbell aperture at the
648 * requested doorbell index (VEGA10+).
649 */
650void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
651{
56b53c0b 652 if (amdgpu_device_skip_hw_access(adev))
bf36b52e
AG
653 return;
654
832be404
KW
655 if (index < adev->doorbell.num_doorbells) {
656 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
657 } else {
658 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
659 }
660}
661
1bba3683
HZ
662/**
663 * amdgpu_device_indirect_rreg - read an indirect register
664 *
665 * @adev: amdgpu_device pointer
666 * @pcie_index: mmio register offset
667 * @pcie_data: mmio register offset
22f453fb 668 * @reg_addr: indirect register address to read from
1bba3683
HZ
669 *
670 * Returns the value of indirect register @reg_addr
671 */
672u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
673 u32 pcie_index, u32 pcie_data,
674 u32 reg_addr)
675{
676 unsigned long flags;
677 u32 r;
678 void __iomem *pcie_index_offset;
679 void __iomem *pcie_data_offset;
680
681 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
682 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
683 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
684
685 writel(reg_addr, pcie_index_offset);
686 readl(pcie_index_offset);
687 r = readl(pcie_data_offset);
688 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
689
690 return r;
691}
692
693/**
694 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
695 *
696 * @adev: amdgpu_device pointer
697 * @pcie_index: mmio register offset
698 * @pcie_data: mmio register offset
22f453fb 699 * @reg_addr: indirect register address to read from
1bba3683
HZ
700 *
701 * Returns the value of indirect register @reg_addr
702 */
703u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
704 u32 pcie_index, u32 pcie_data,
705 u32 reg_addr)
706{
707 unsigned long flags;
708 u64 r;
709 void __iomem *pcie_index_offset;
710 void __iomem *pcie_data_offset;
711
712 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
713 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
714 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
715
716 /* read low 32 bits */
717 writel(reg_addr, pcie_index_offset);
718 readl(pcie_index_offset);
719 r = readl(pcie_data_offset);
720 /* read high 32 bits */
721 writel(reg_addr + 4, pcie_index_offset);
722 readl(pcie_index_offset);
723 r |= ((u64)readl(pcie_data_offset) << 32);
724 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
725
726 return r;
727}
728
729/**
730 * amdgpu_device_indirect_wreg - write an indirect register address
731 *
732 * @adev: amdgpu_device pointer
733 * @pcie_index: mmio register offset
734 * @pcie_data: mmio register offset
735 * @reg_addr: indirect register offset
736 * @reg_data: indirect register data
737 *
738 */
739void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
740 u32 pcie_index, u32 pcie_data,
741 u32 reg_addr, u32 reg_data)
742{
743 unsigned long flags;
744 void __iomem *pcie_index_offset;
745 void __iomem *pcie_data_offset;
746
747 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
748 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
749 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
750
751 writel(reg_addr, pcie_index_offset);
752 readl(pcie_index_offset);
753 writel(reg_data, pcie_data_offset);
754 readl(pcie_data_offset);
755 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
756}
757
758/**
759 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
760 *
761 * @adev: amdgpu_device pointer
762 * @pcie_index: mmio register offset
763 * @pcie_data: mmio register offset
764 * @reg_addr: indirect register offset
765 * @reg_data: indirect register data
766 *
767 */
768void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
769 u32 pcie_index, u32 pcie_data,
770 u32 reg_addr, u64 reg_data)
771{
772 unsigned long flags;
773 void __iomem *pcie_index_offset;
774 void __iomem *pcie_data_offset;
775
776 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
777 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
778 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
779
780 /* write low 32 bits */
781 writel(reg_addr, pcie_index_offset);
782 readl(pcie_index_offset);
783 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
784 readl(pcie_data_offset);
785 /* write high 32 bits */
786 writel(reg_addr + 4, pcie_index_offset);
787 readl(pcie_index_offset);
788 writel((u32)(reg_data >> 32), pcie_data_offset);
789 readl(pcie_data_offset);
790 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
791}
792
d38ceaf9
AD
793/**
794 * amdgpu_invalid_rreg - dummy reg read function
795 *
982a820b 796 * @adev: amdgpu_device pointer
d38ceaf9
AD
797 * @reg: offset of register
798 *
799 * Dummy register read function. Used for register blocks
800 * that certain asics don't have (all asics).
801 * Returns the value in the register.
802 */
803static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
804{
805 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
806 BUG();
807 return 0;
808}
809
810/**
811 * amdgpu_invalid_wreg - dummy reg write function
812 *
982a820b 813 * @adev: amdgpu_device pointer
d38ceaf9
AD
814 * @reg: offset of register
815 * @v: value to write to the register
816 *
817 * Dummy register read function. Used for register blocks
818 * that certain asics don't have (all asics).
819 */
820static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
821{
822 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
823 reg, v);
824 BUG();
825}
826
4fa1c6a6
TZ
827/**
828 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
829 *
982a820b 830 * @adev: amdgpu_device pointer
4fa1c6a6
TZ
831 * @reg: offset of register
832 *
833 * Dummy register read function. Used for register blocks
834 * that certain asics don't have (all asics).
835 * Returns the value in the register.
836 */
837static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
838{
839 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
840 BUG();
841 return 0;
842}
843
844/**
845 * amdgpu_invalid_wreg64 - dummy reg write function
846 *
982a820b 847 * @adev: amdgpu_device pointer
4fa1c6a6
TZ
848 * @reg: offset of register
849 * @v: value to write to the register
850 *
851 * Dummy register read function. Used for register blocks
852 * that certain asics don't have (all asics).
853 */
854static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
855{
856 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
857 reg, v);
858 BUG();
859}
860
d38ceaf9
AD
861/**
862 * amdgpu_block_invalid_rreg - dummy reg read function
863 *
982a820b 864 * @adev: amdgpu_device pointer
d38ceaf9
AD
865 * @block: offset of instance
866 * @reg: offset of register
867 *
868 * Dummy register read function. Used for register blocks
869 * that certain asics don't have (all asics).
870 * Returns the value in the register.
871 */
872static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
873 uint32_t block, uint32_t reg)
874{
875 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
876 reg, block);
877 BUG();
878 return 0;
879}
880
881/**
882 * amdgpu_block_invalid_wreg - dummy reg write function
883 *
982a820b 884 * @adev: amdgpu_device pointer
d38ceaf9
AD
885 * @block: offset of instance
886 * @reg: offset of register
887 * @v: value to write to the register
888 *
889 * Dummy register read function. Used for register blocks
890 * that certain asics don't have (all asics).
891 */
892static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
893 uint32_t block,
894 uint32_t reg, uint32_t v)
895{
896 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
897 reg, block, v);
898 BUG();
899}
900
4d2997ab
AD
901/**
902 * amdgpu_device_asic_init - Wrapper for atom asic_init
903 *
982a820b 904 * @adev: amdgpu_device pointer
4d2997ab
AD
905 *
906 * Does any asic specific work and then calls atom asic init.
907 */
908static int amdgpu_device_asic_init(struct amdgpu_device *adev)
909{
910 amdgpu_asic_pre_asic_init(adev);
911
912 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
913}
914
e3ecdffa
AD
915/**
916 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
917 *
982a820b 918 * @adev: amdgpu_device pointer
e3ecdffa
AD
919 *
920 * Allocates a scratch page of VRAM for use by various things in the
921 * driver.
922 */
06ec9070 923static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
d38ceaf9 924{
a4a02777
CK
925 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
926 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
927 &adev->vram_scratch.robj,
928 &adev->vram_scratch.gpu_addr,
929 (void **)&adev->vram_scratch.ptr);
d38ceaf9
AD
930}
931
e3ecdffa
AD
932/**
933 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
934 *
982a820b 935 * @adev: amdgpu_device pointer
e3ecdffa
AD
936 *
937 * Frees the VRAM scratch page.
938 */
06ec9070 939static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
d38ceaf9 940{
078af1a3 941 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
d38ceaf9
AD
942}
943
944/**
9c3f2b54 945 * amdgpu_device_program_register_sequence - program an array of registers.
d38ceaf9
AD
946 *
947 * @adev: amdgpu_device pointer
948 * @registers: pointer to the register array
949 * @array_size: size of the register array
950 *
951 * Programs an array or registers with and and or masks.
952 * This is a helper for setting golden registers.
953 */
9c3f2b54
AD
954void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
955 const u32 *registers,
956 const u32 array_size)
d38ceaf9
AD
957{
958 u32 tmp, reg, and_mask, or_mask;
959 int i;
960
961 if (array_size % 3)
962 return;
963
964 for (i = 0; i < array_size; i +=3) {
965 reg = registers[i + 0];
966 and_mask = registers[i + 1];
967 or_mask = registers[i + 2];
968
969 if (and_mask == 0xffffffff) {
970 tmp = or_mask;
971 } else {
972 tmp = RREG32(reg);
973 tmp &= ~and_mask;
e0d07657
HZ
974 if (adev->family >= AMDGPU_FAMILY_AI)
975 tmp |= (or_mask & and_mask);
976 else
977 tmp |= or_mask;
d38ceaf9
AD
978 }
979 WREG32(reg, tmp);
980 }
981}
982
e3ecdffa
AD
983/**
984 * amdgpu_device_pci_config_reset - reset the GPU
985 *
986 * @adev: amdgpu_device pointer
987 *
988 * Resets the GPU using the pci config reset sequence.
989 * Only applicable to asics prior to vega10.
990 */
8111c387 991void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
d38ceaf9
AD
992{
993 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
994}
995
af484df8
AD
996/**
997 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
998 *
999 * @adev: amdgpu_device pointer
1000 *
1001 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1002 */
1003int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1004{
1005 return pci_reset_function(adev->pdev);
1006}
1007
d38ceaf9
AD
1008/*
1009 * GPU doorbell aperture helpers function.
1010 */
1011/**
06ec9070 1012 * amdgpu_device_doorbell_init - Init doorbell driver information.
d38ceaf9
AD
1013 *
1014 * @adev: amdgpu_device pointer
1015 *
1016 * Init doorbell driver information (CIK)
1017 * Returns 0 on success, error on failure.
1018 */
06ec9070 1019static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
d38ceaf9 1020{
6585661d 1021
705e519e
CK
1022 /* No doorbell on SI hardware generation */
1023 if (adev->asic_type < CHIP_BONAIRE) {
1024 adev->doorbell.base = 0;
1025 adev->doorbell.size = 0;
1026 adev->doorbell.num_doorbells = 0;
1027 adev->doorbell.ptr = NULL;
1028 return 0;
1029 }
1030
d6895ad3
CK
1031 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1032 return -EINVAL;
1033
22357775
AD
1034 amdgpu_asic_init_doorbell_index(adev);
1035
d38ceaf9
AD
1036 /* doorbell bar mapping */
1037 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1038 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1039
edf600da 1040 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
9564f192 1041 adev->doorbell_index.max_assignment+1);
d38ceaf9
AD
1042 if (adev->doorbell.num_doorbells == 0)
1043 return -EINVAL;
1044
ec3db8a6 1045 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
88dc26e4
OZ
1046 * paging queue doorbell use the second page. The
1047 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1048 * doorbells are in the first page. So with paging queue enabled,
1049 * the max num_doorbells should + 1 page (0x400 in dword)
ec3db8a6
PY
1050 */
1051 if (adev->asic_type >= CHIP_VEGA10)
88dc26e4 1052 adev->doorbell.num_doorbells += 0x400;
ec3db8a6 1053
8972e5d2
CK
1054 adev->doorbell.ptr = ioremap(adev->doorbell.base,
1055 adev->doorbell.num_doorbells *
1056 sizeof(u32));
1057 if (adev->doorbell.ptr == NULL)
d38ceaf9 1058 return -ENOMEM;
d38ceaf9
AD
1059
1060 return 0;
1061}
1062
1063/**
06ec9070 1064 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
d38ceaf9
AD
1065 *
1066 * @adev: amdgpu_device pointer
1067 *
1068 * Tear down doorbell driver information (CIK)
1069 */
06ec9070 1070static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
d38ceaf9
AD
1071{
1072 iounmap(adev->doorbell.ptr);
1073 adev->doorbell.ptr = NULL;
1074}
1075
22cb0164 1076
d38ceaf9
AD
1077
1078/*
06ec9070 1079 * amdgpu_device_wb_*()
455a7bc2 1080 * Writeback is the method by which the GPU updates special pages in memory
ea81a173 1081 * with the status of certain GPU events (fences, ring pointers,etc.).
d38ceaf9
AD
1082 */
1083
1084/**
06ec9070 1085 * amdgpu_device_wb_fini - Disable Writeback and free memory
d38ceaf9
AD
1086 *
1087 * @adev: amdgpu_device pointer
1088 *
1089 * Disables Writeback and frees the Writeback memory (all asics).
1090 * Used at driver shutdown.
1091 */
06ec9070 1092static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
d38ceaf9
AD
1093{
1094 if (adev->wb.wb_obj) {
a76ed485
AD
1095 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1096 &adev->wb.gpu_addr,
1097 (void **)&adev->wb.wb);
d38ceaf9
AD
1098 adev->wb.wb_obj = NULL;
1099 }
1100}
1101
1102/**
06ec9070 1103 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
d38ceaf9
AD
1104 *
1105 * @adev: amdgpu_device pointer
1106 *
455a7bc2 1107 * Initializes writeback and allocates writeback memory (all asics).
d38ceaf9
AD
1108 * Used at driver startup.
1109 * Returns 0 on success or an -error on failure.
1110 */
06ec9070 1111static int amdgpu_device_wb_init(struct amdgpu_device *adev)
d38ceaf9
AD
1112{
1113 int r;
1114
1115 if (adev->wb.wb_obj == NULL) {
97407b63
AD
1116 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1117 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
a76ed485
AD
1118 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1119 &adev->wb.wb_obj, &adev->wb.gpu_addr,
1120 (void **)&adev->wb.wb);
d38ceaf9
AD
1121 if (r) {
1122 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1123 return r;
1124 }
d38ceaf9
AD
1125
1126 adev->wb.num_wb = AMDGPU_MAX_WB;
1127 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1128
1129 /* clear wb memory */
73469585 1130 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
d38ceaf9
AD
1131 }
1132
1133 return 0;
1134}
1135
1136/**
131b4b36 1137 * amdgpu_device_wb_get - Allocate a wb entry
d38ceaf9
AD
1138 *
1139 * @adev: amdgpu_device pointer
1140 * @wb: wb index
1141 *
1142 * Allocate a wb slot for use by the driver (all asics).
1143 * Returns 0 on success or -EINVAL on failure.
1144 */
131b4b36 1145int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
d38ceaf9
AD
1146{
1147 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
d38ceaf9 1148
97407b63 1149 if (offset < adev->wb.num_wb) {
7014285a 1150 __set_bit(offset, adev->wb.used);
63ae07ca 1151 *wb = offset << 3; /* convert to dw offset */
0915fdbc
ML
1152 return 0;
1153 } else {
1154 return -EINVAL;
1155 }
1156}
1157
d38ceaf9 1158/**
131b4b36 1159 * amdgpu_device_wb_free - Free a wb entry
d38ceaf9
AD
1160 *
1161 * @adev: amdgpu_device pointer
1162 * @wb: wb index
1163 *
1164 * Free a wb slot allocated for use by the driver (all asics)
1165 */
131b4b36 1166void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
d38ceaf9 1167{
73469585 1168 wb >>= 3;
d38ceaf9 1169 if (wb < adev->wb.num_wb)
73469585 1170 __clear_bit(wb, adev->wb.used);
d38ceaf9
AD
1171}
1172
d6895ad3
CK
1173/**
1174 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1175 *
1176 * @adev: amdgpu_device pointer
1177 *
1178 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1179 * to fail, but if any of the BARs is not accessible after the size we abort
1180 * driver loading by returning -ENODEV.
1181 */
1182int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1183{
453f617a 1184 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
31b8adab
CK
1185 struct pci_bus *root;
1186 struct resource *res;
1187 unsigned i;
d6895ad3
CK
1188 u16 cmd;
1189 int r;
1190
0c03b912 1191 /* Bypass for VF */
1192 if (amdgpu_sriov_vf(adev))
1193 return 0;
1194
b7221f2b
AD
1195 /* skip if the bios has already enabled large BAR */
1196 if (adev->gmc.real_vram_size &&
1197 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1198 return 0;
1199
31b8adab
CK
1200 /* Check if the root BUS has 64bit memory resources */
1201 root = adev->pdev->bus;
1202 while (root->parent)
1203 root = root->parent;
1204
1205 pci_bus_for_each_resource(root, res, i) {
0ebb7c54 1206 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
31b8adab
CK
1207 res->start > 0x100000000ull)
1208 break;
1209 }
1210
1211 /* Trying to resize is pointless without a root hub window above 4GB */
1212 if (!res)
1213 return 0;
1214
453f617a
ND
1215 /* Limit the BAR size to what is available */
1216 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1217 rbar_size);
1218
d6895ad3
CK
1219 /* Disable memory decoding while we change the BAR addresses and size */
1220 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1221 pci_write_config_word(adev->pdev, PCI_COMMAND,
1222 cmd & ~PCI_COMMAND_MEMORY);
1223
1224 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
06ec9070 1225 amdgpu_device_doorbell_fini(adev);
d6895ad3
CK
1226 if (adev->asic_type >= CHIP_BONAIRE)
1227 pci_release_resource(adev->pdev, 2);
1228
1229 pci_release_resource(adev->pdev, 0);
1230
1231 r = pci_resize_resource(adev->pdev, 0, rbar_size);
1232 if (r == -ENOSPC)
1233 DRM_INFO("Not enough PCI address space for a large BAR.");
1234 else if (r && r != -ENOTSUPP)
1235 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1236
1237 pci_assign_unassigned_bus_resources(adev->pdev->bus);
1238
1239 /* When the doorbell or fb BAR isn't available we have no chance of
1240 * using the device.
1241 */
06ec9070 1242 r = amdgpu_device_doorbell_init(adev);
d6895ad3
CK
1243 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1244 return -ENODEV;
1245
1246 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1247
1248 return 0;
1249}
a05502e5 1250
d38ceaf9
AD
1251/*
1252 * GPU helpers function.
1253 */
1254/**
39c640c0 1255 * amdgpu_device_need_post - check if the hw need post or not
d38ceaf9
AD
1256 *
1257 * @adev: amdgpu_device pointer
1258 *
c836fec5
JQ
1259 * Check if the asic has been initialized (all asics) at driver startup
1260 * or post is needed if hw reset is performed.
1261 * Returns true if need or false if not.
d38ceaf9 1262 */
39c640c0 1263bool amdgpu_device_need_post(struct amdgpu_device *adev)
d38ceaf9
AD
1264{
1265 uint32_t reg;
1266
bec86378
ML
1267 if (amdgpu_sriov_vf(adev))
1268 return false;
1269
1270 if (amdgpu_passthrough(adev)) {
1da2c326
ML
1271 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1272 * some old smc fw still need driver do vPost otherwise gpu hang, while
1273 * those smc fw version above 22.15 doesn't have this flaw, so we force
1274 * vpost executed for smc version below 22.15
bec86378
ML
1275 */
1276 if (adev->asic_type == CHIP_FIJI) {
1277 int err;
1278 uint32_t fw_ver;
1279 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1280 /* force vPost if error occured */
1281 if (err)
1282 return true;
1283
1284 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1da2c326
ML
1285 if (fw_ver < 0x00160e00)
1286 return true;
bec86378 1287 }
bec86378 1288 }
91fe77eb 1289
e3c1b071 1290 /* Don't post if we need to reset whole hive on init */
1291 if (adev->gmc.xgmi.pending_reset)
1292 return false;
1293
91fe77eb 1294 if (adev->has_hw_reset) {
1295 adev->has_hw_reset = false;
1296 return true;
1297 }
1298
1299 /* bios scratch used on CIK+ */
1300 if (adev->asic_type >= CHIP_BONAIRE)
1301 return amdgpu_atombios_scratch_need_asic_init(adev);
1302
1303 /* check MEM_SIZE for older asics */
1304 reg = amdgpu_asic_get_config_memsize(adev);
1305
1306 if ((reg != 0) && (reg != 0xffffffff))
1307 return false;
1308
1309 return true;
bec86378
ML
1310}
1311
d38ceaf9
AD
1312/* if we get transitioned to only one device, take VGA back */
1313/**
06ec9070 1314 * amdgpu_device_vga_set_decode - enable/disable vga decode
d38ceaf9 1315 *
bf44e8ce 1316 * @pdev: PCI device pointer
d38ceaf9
AD
1317 * @state: enable/disable vga decode
1318 *
1319 * Enable/disable vga decode (all asics).
1320 * Returns VGA resource flags.
1321 */
bf44e8ce
CH
1322static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1323 bool state)
d38ceaf9 1324{
bf44e8ce 1325 struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
d38ceaf9
AD
1326 amdgpu_asic_set_vga_state(adev, state);
1327 if (state)
1328 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1329 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1330 else
1331 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1332}
1333
e3ecdffa
AD
1334/**
1335 * amdgpu_device_check_block_size - validate the vm block size
1336 *
1337 * @adev: amdgpu_device pointer
1338 *
1339 * Validates the vm block size specified via module parameter.
1340 * The vm block size defines number of bits in page table versus page directory,
1341 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1342 * page table and the remaining bits are in the page directory.
1343 */
06ec9070 1344static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
a1adf8be
CZ
1345{
1346 /* defines number of bits in page table versus page directory,
1347 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1348 * page table and the remaining bits are in the page directory */
bab4fee7
JZ
1349 if (amdgpu_vm_block_size == -1)
1350 return;
a1adf8be 1351
bab4fee7 1352 if (amdgpu_vm_block_size < 9) {
a1adf8be
CZ
1353 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1354 amdgpu_vm_block_size);
97489129 1355 amdgpu_vm_block_size = -1;
a1adf8be 1356 }
a1adf8be
CZ
1357}
1358
e3ecdffa
AD
1359/**
1360 * amdgpu_device_check_vm_size - validate the vm size
1361 *
1362 * @adev: amdgpu_device pointer
1363 *
1364 * Validates the vm size in GB specified via module parameter.
1365 * The VM size is the size of the GPU virtual memory space in GB.
1366 */
06ec9070 1367static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
83ca145d 1368{
64dab074
AD
1369 /* no need to check the default value */
1370 if (amdgpu_vm_size == -1)
1371 return;
1372
83ca145d
ZJ
1373 if (amdgpu_vm_size < 1) {
1374 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1375 amdgpu_vm_size);
f3368128 1376 amdgpu_vm_size = -1;
83ca145d 1377 }
83ca145d
ZJ
1378}
1379
7951e376
RZ
1380static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1381{
1382 struct sysinfo si;
a9d4fe2f 1383 bool is_os_64 = (sizeof(void *) == 8);
7951e376
RZ
1384 uint64_t total_memory;
1385 uint64_t dram_size_seven_GB = 0x1B8000000;
1386 uint64_t dram_size_three_GB = 0xB8000000;
1387
1388 if (amdgpu_smu_memory_pool_size == 0)
1389 return;
1390
1391 if (!is_os_64) {
1392 DRM_WARN("Not 64-bit OS, feature not supported\n");
1393 goto def_value;
1394 }
1395 si_meminfo(&si);
1396 total_memory = (uint64_t)si.totalram * si.mem_unit;
1397
1398 if ((amdgpu_smu_memory_pool_size == 1) ||
1399 (amdgpu_smu_memory_pool_size == 2)) {
1400 if (total_memory < dram_size_three_GB)
1401 goto def_value1;
1402 } else if ((amdgpu_smu_memory_pool_size == 4) ||
1403 (amdgpu_smu_memory_pool_size == 8)) {
1404 if (total_memory < dram_size_seven_GB)
1405 goto def_value1;
1406 } else {
1407 DRM_WARN("Smu memory pool size not supported\n");
1408 goto def_value;
1409 }
1410 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1411
1412 return;
1413
1414def_value1:
1415 DRM_WARN("No enough system memory\n");
1416def_value:
1417 adev->pm.smu_prv_buffer_size = 0;
1418}
1419
9f6a7857
HR
1420static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1421{
1422 if (!(adev->flags & AMD_IS_APU) ||
1423 adev->asic_type < CHIP_RAVEN)
1424 return 0;
1425
1426 switch (adev->asic_type) {
1427 case CHIP_RAVEN:
1428 if (adev->pdev->device == 0x15dd)
1429 adev->apu_flags |= AMD_APU_IS_RAVEN;
1430 if (adev->pdev->device == 0x15d8)
1431 adev->apu_flags |= AMD_APU_IS_PICASSO;
1432 break;
1433 case CHIP_RENOIR:
1434 if ((adev->pdev->device == 0x1636) ||
1435 (adev->pdev->device == 0x164c))
1436 adev->apu_flags |= AMD_APU_IS_RENOIR;
1437 else
1438 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1439 break;
1440 case CHIP_VANGOGH:
1441 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1442 break;
1443 case CHIP_YELLOW_CARP:
1444 break;
d0f56dc2
TZ
1445 case CHIP_CYAN_SKILLFISH:
1446 if (adev->pdev->device == 0x13FE)
1447 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1448 break;
9f6a7857
HR
1449 default:
1450 return -EINVAL;
1451 }
1452
1453 return 0;
1454}
1455
d38ceaf9 1456/**
06ec9070 1457 * amdgpu_device_check_arguments - validate module params
d38ceaf9
AD
1458 *
1459 * @adev: amdgpu_device pointer
1460 *
1461 * Validates certain module parameters and updates
1462 * the associated values used by the driver (all asics).
1463 */
912dfc84 1464static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
d38ceaf9 1465{
5b011235
CZ
1466 if (amdgpu_sched_jobs < 4) {
1467 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1468 amdgpu_sched_jobs);
1469 amdgpu_sched_jobs = 4;
76117507 1470 } else if (!is_power_of_2(amdgpu_sched_jobs)){
5b011235
CZ
1471 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1472 amdgpu_sched_jobs);
1473 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1474 }
d38ceaf9 1475
83e74db6 1476 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
f9321cc4
CK
1477 /* gart size must be greater or equal to 32M */
1478 dev_warn(adev->dev, "gart size (%d) too small\n",
1479 amdgpu_gart_size);
83e74db6 1480 amdgpu_gart_size = -1;
d38ceaf9
AD
1481 }
1482
36d38372 1483 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
c4e1a13a 1484 /* gtt size must be greater or equal to 32M */
36d38372
CK
1485 dev_warn(adev->dev, "gtt size (%d) too small\n",
1486 amdgpu_gtt_size);
1487 amdgpu_gtt_size = -1;
d38ceaf9
AD
1488 }
1489
d07f14be
RH
1490 /* valid range is between 4 and 9 inclusive */
1491 if (amdgpu_vm_fragment_size != -1 &&
1492 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1493 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1494 amdgpu_vm_fragment_size = -1;
1495 }
1496
5d5bd5e3
KW
1497 if (amdgpu_sched_hw_submission < 2) {
1498 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1499 amdgpu_sched_hw_submission);
1500 amdgpu_sched_hw_submission = 2;
1501 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1502 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1503 amdgpu_sched_hw_submission);
1504 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1505 }
1506
7951e376
RZ
1507 amdgpu_device_check_smu_prv_buffer_size(adev);
1508
06ec9070 1509 amdgpu_device_check_vm_size(adev);
d38ceaf9 1510
06ec9070 1511 amdgpu_device_check_block_size(adev);
6a7f76e7 1512
19aede77 1513 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
912dfc84 1514
c6252390 1515 amdgpu_gmc_tmz_set(adev);
01a8dcec 1516
9b498efa
AD
1517 amdgpu_gmc_noretry_set(adev);
1518
e3c00faa 1519 return 0;
d38ceaf9
AD
1520}
1521
1522/**
1523 * amdgpu_switcheroo_set_state - set switcheroo state
1524 *
1525 * @pdev: pci dev pointer
1694467b 1526 * @state: vga_switcheroo state
d38ceaf9
AD
1527 *
1528 * Callback for the switcheroo driver. Suspends or resumes the
1529 * the asics before or after it is powered up using ACPI methods.
1530 */
8aba21b7
LT
1531static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1532 enum vga_switcheroo_state state)
d38ceaf9
AD
1533{
1534 struct drm_device *dev = pci_get_drvdata(pdev);
de185019 1535 int r;
d38ceaf9 1536
b98c6299 1537 if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
d38ceaf9
AD
1538 return;
1539
1540 if (state == VGA_SWITCHEROO_ON) {
dd4fa6c1 1541 pr_info("switched on\n");
d38ceaf9
AD
1542 /* don't suspend or resume card normally */
1543 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1544
8f66090b
TZ
1545 pci_set_power_state(pdev, PCI_D0);
1546 amdgpu_device_load_pci_state(pdev);
1547 r = pci_enable_device(pdev);
de185019
AD
1548 if (r)
1549 DRM_WARN("pci_enable_device failed (%d)\n", r);
1550 amdgpu_device_resume(dev, true);
d38ceaf9 1551
d38ceaf9 1552 dev->switch_power_state = DRM_SWITCH_POWER_ON;
d38ceaf9 1553 } else {
dd4fa6c1 1554 pr_info("switched off\n");
d38ceaf9 1555 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
de185019 1556 amdgpu_device_suspend(dev, true);
8f66090b 1557 amdgpu_device_cache_pci_state(pdev);
de185019 1558 /* Shut down the device */
8f66090b
TZ
1559 pci_disable_device(pdev);
1560 pci_set_power_state(pdev, PCI_D3cold);
d38ceaf9
AD
1561 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1562 }
1563}
1564
1565/**
1566 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1567 *
1568 * @pdev: pci dev pointer
1569 *
1570 * Callback for the switcheroo driver. Check of the switcheroo
1571 * state can be changed.
1572 * Returns true if the state can be changed, false if not.
1573 */
1574static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1575{
1576 struct drm_device *dev = pci_get_drvdata(pdev);
1577
1578 /*
1579 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1580 * locking inversion with the driver load path. And the access here is
1581 * completely racy anyway. So don't bother with locking for now.
1582 */
7e13ad89 1583 return atomic_read(&dev->open_count) == 0;
d38ceaf9
AD
1584}
1585
1586static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1587 .set_gpu_state = amdgpu_switcheroo_set_state,
1588 .reprobe = NULL,
1589 .can_switch = amdgpu_switcheroo_can_switch,
1590};
1591
e3ecdffa
AD
1592/**
1593 * amdgpu_device_ip_set_clockgating_state - set the CG state
1594 *
87e3f136 1595 * @dev: amdgpu_device pointer
e3ecdffa
AD
1596 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1597 * @state: clockgating state (gate or ungate)
1598 *
1599 * Sets the requested clockgating state for all instances of
1600 * the hardware IP specified.
1601 * Returns the error code from the last instance.
1602 */
43fa561f 1603int amdgpu_device_ip_set_clockgating_state(void *dev,
2990a1fc
AD
1604 enum amd_ip_block_type block_type,
1605 enum amd_clockgating_state state)
d38ceaf9 1606{
43fa561f 1607 struct amdgpu_device *adev = dev;
d38ceaf9
AD
1608 int i, r = 0;
1609
1610 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1611 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1612 continue;
c722865a
RZ
1613 if (adev->ip_blocks[i].version->type != block_type)
1614 continue;
1615 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1616 continue;
1617 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1618 (void *)adev, state);
1619 if (r)
1620 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1621 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1622 }
1623 return r;
1624}
1625
e3ecdffa
AD
1626/**
1627 * amdgpu_device_ip_set_powergating_state - set the PG state
1628 *
87e3f136 1629 * @dev: amdgpu_device pointer
e3ecdffa
AD
1630 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1631 * @state: powergating state (gate or ungate)
1632 *
1633 * Sets the requested powergating state for all instances of
1634 * the hardware IP specified.
1635 * Returns the error code from the last instance.
1636 */
43fa561f 1637int amdgpu_device_ip_set_powergating_state(void *dev,
2990a1fc
AD
1638 enum amd_ip_block_type block_type,
1639 enum amd_powergating_state state)
d38ceaf9 1640{
43fa561f 1641 struct amdgpu_device *adev = dev;
d38ceaf9
AD
1642 int i, r = 0;
1643
1644 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1645 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1646 continue;
c722865a
RZ
1647 if (adev->ip_blocks[i].version->type != block_type)
1648 continue;
1649 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1650 continue;
1651 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1652 (void *)adev, state);
1653 if (r)
1654 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1655 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1656 }
1657 return r;
1658}
1659
e3ecdffa
AD
1660/**
1661 * amdgpu_device_ip_get_clockgating_state - get the CG state
1662 *
1663 * @adev: amdgpu_device pointer
1664 * @flags: clockgating feature flags
1665 *
1666 * Walks the list of IPs on the device and updates the clockgating
1667 * flags for each IP.
1668 * Updates @flags with the feature flags for each hardware IP where
1669 * clockgating is enabled.
1670 */
2990a1fc
AD
1671void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1672 u32 *flags)
6cb2d4e4
HR
1673{
1674 int i;
1675
1676 for (i = 0; i < adev->num_ip_blocks; i++) {
1677 if (!adev->ip_blocks[i].status.valid)
1678 continue;
1679 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1680 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1681 }
1682}
1683
e3ecdffa
AD
1684/**
1685 * amdgpu_device_ip_wait_for_idle - wait for idle
1686 *
1687 * @adev: amdgpu_device pointer
1688 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1689 *
1690 * Waits for the request hardware IP to be idle.
1691 * Returns 0 for success or a negative error code on failure.
1692 */
2990a1fc
AD
1693int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1694 enum amd_ip_block_type block_type)
5dbbb60b
AD
1695{
1696 int i, r;
1697
1698 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1699 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1700 continue;
a1255107
AD
1701 if (adev->ip_blocks[i].version->type == block_type) {
1702 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
5dbbb60b
AD
1703 if (r)
1704 return r;
1705 break;
1706 }
1707 }
1708 return 0;
1709
1710}
1711
e3ecdffa
AD
1712/**
1713 * amdgpu_device_ip_is_idle - is the hardware IP idle
1714 *
1715 * @adev: amdgpu_device pointer
1716 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1717 *
1718 * Check if the hardware IP is idle or not.
1719 * Returns true if it the IP is idle, false if not.
1720 */
2990a1fc
AD
1721bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1722 enum amd_ip_block_type block_type)
5dbbb60b
AD
1723{
1724 int i;
1725
1726 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1727 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1728 continue;
a1255107
AD
1729 if (adev->ip_blocks[i].version->type == block_type)
1730 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
5dbbb60b
AD
1731 }
1732 return true;
1733
1734}
1735
e3ecdffa
AD
1736/**
1737 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1738 *
1739 * @adev: amdgpu_device pointer
87e3f136 1740 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
e3ecdffa
AD
1741 *
1742 * Returns a pointer to the hardware IP block structure
1743 * if it exists for the asic, otherwise NULL.
1744 */
2990a1fc
AD
1745struct amdgpu_ip_block *
1746amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1747 enum amd_ip_block_type type)
d38ceaf9
AD
1748{
1749 int i;
1750
1751 for (i = 0; i < adev->num_ip_blocks; i++)
a1255107 1752 if (adev->ip_blocks[i].version->type == type)
d38ceaf9
AD
1753 return &adev->ip_blocks[i];
1754
1755 return NULL;
1756}
1757
1758/**
2990a1fc 1759 * amdgpu_device_ip_block_version_cmp
d38ceaf9
AD
1760 *
1761 * @adev: amdgpu_device pointer
5fc3aeeb 1762 * @type: enum amd_ip_block_type
d38ceaf9
AD
1763 * @major: major version
1764 * @minor: minor version
1765 *
1766 * return 0 if equal or greater
1767 * return 1 if smaller or the ip_block doesn't exist
1768 */
2990a1fc
AD
1769int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1770 enum amd_ip_block_type type,
1771 u32 major, u32 minor)
d38ceaf9 1772{
2990a1fc 1773 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
d38ceaf9 1774
a1255107
AD
1775 if (ip_block && ((ip_block->version->major > major) ||
1776 ((ip_block->version->major == major) &&
1777 (ip_block->version->minor >= minor))))
d38ceaf9
AD
1778 return 0;
1779
1780 return 1;
1781}
1782
a1255107 1783/**
2990a1fc 1784 * amdgpu_device_ip_block_add
a1255107
AD
1785 *
1786 * @adev: amdgpu_device pointer
1787 * @ip_block_version: pointer to the IP to add
1788 *
1789 * Adds the IP block driver information to the collection of IPs
1790 * on the asic.
1791 */
2990a1fc
AD
1792int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1793 const struct amdgpu_ip_block_version *ip_block_version)
a1255107
AD
1794{
1795 if (!ip_block_version)
1796 return -EINVAL;
1797
7bd939d0
LG
1798 switch (ip_block_version->type) {
1799 case AMD_IP_BLOCK_TYPE_VCN:
1800 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1801 return 0;
1802 break;
1803 case AMD_IP_BLOCK_TYPE_JPEG:
1804 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1805 return 0;
1806 break;
1807 default:
1808 break;
1809 }
1810
e966a725 1811 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
a0bae357
HR
1812 ip_block_version->funcs->name);
1813
a1255107
AD
1814 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1815
1816 return 0;
1817}
1818
e3ecdffa
AD
1819/**
1820 * amdgpu_device_enable_virtual_display - enable virtual display feature
1821 *
1822 * @adev: amdgpu_device pointer
1823 *
1824 * Enabled the virtual display feature if the user has enabled it via
1825 * the module parameter virtual_display. This feature provides a virtual
1826 * display hardware on headless boards or in virtualized environments.
1827 * This function parses and validates the configuration string specified by
1828 * the user and configues the virtual display configuration (number of
1829 * virtual connectors, crtcs, etc.) specified.
1830 */
483ef985 1831static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
9accf2fd
ED
1832{
1833 adev->enable_virtual_display = false;
1834
1835 if (amdgpu_virtual_display) {
8f66090b 1836 const char *pci_address_name = pci_name(adev->pdev);
0f66356d 1837 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
9accf2fd
ED
1838
1839 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1840 pciaddstr_tmp = pciaddstr;
0f66356d
ED
1841 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1842 pciaddname = strsep(&pciaddname_tmp, ",");
967de2a9
YT
1843 if (!strcmp("all", pciaddname)
1844 || !strcmp(pci_address_name, pciaddname)) {
0f66356d
ED
1845 long num_crtc;
1846 int res = -1;
1847
9accf2fd 1848 adev->enable_virtual_display = true;
0f66356d
ED
1849
1850 if (pciaddname_tmp)
1851 res = kstrtol(pciaddname_tmp, 10,
1852 &num_crtc);
1853
1854 if (!res) {
1855 if (num_crtc < 1)
1856 num_crtc = 1;
1857 if (num_crtc > 6)
1858 num_crtc = 6;
1859 adev->mode_info.num_crtc = num_crtc;
1860 } else {
1861 adev->mode_info.num_crtc = 1;
1862 }
9accf2fd
ED
1863 break;
1864 }
1865 }
1866
0f66356d
ED
1867 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1868 amdgpu_virtual_display, pci_address_name,
1869 adev->enable_virtual_display, adev->mode_info.num_crtc);
9accf2fd
ED
1870
1871 kfree(pciaddstr);
1872 }
1873}
1874
e3ecdffa
AD
1875/**
1876 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1877 *
1878 * @adev: amdgpu_device pointer
1879 *
1880 * Parses the asic configuration parameters specified in the gpu info
1881 * firmware and makes them availale to the driver for use in configuring
1882 * the asic.
1883 * Returns 0 on success, -EINVAL on failure.
1884 */
e2a75f88
AD
1885static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1886{
e2a75f88 1887 const char *chip_name;
c0a43457 1888 char fw_name[40];
e2a75f88
AD
1889 int err;
1890 const struct gpu_info_firmware_header_v1_0 *hdr;
1891
ab4fe3e1
HR
1892 adev->firmware.gpu_info_fw = NULL;
1893
72de33f8 1894 if (adev->mman.discovery_bin) {
258620d0 1895 amdgpu_discovery_get_gfx_info(adev);
cc375d8c
TY
1896
1897 /*
1898 * FIXME: The bounding box is still needed by Navi12, so
1899 * temporarily read it from gpu_info firmware. Should be droped
1900 * when DAL no longer needs it.
1901 */
1902 if (adev->asic_type != CHIP_NAVI12)
1903 return 0;
258620d0
AD
1904 }
1905
e2a75f88 1906 switch (adev->asic_type) {
e2a75f88
AD
1907#ifdef CONFIG_DRM_AMDGPU_SI
1908 case CHIP_VERDE:
1909 case CHIP_TAHITI:
1910 case CHIP_PITCAIRN:
1911 case CHIP_OLAND:
1912 case CHIP_HAINAN:
1913#endif
1914#ifdef CONFIG_DRM_AMDGPU_CIK
1915 case CHIP_BONAIRE:
1916 case CHIP_HAWAII:
1917 case CHIP_KAVERI:
1918 case CHIP_KABINI:
1919 case CHIP_MULLINS:
1920#endif
da87c30b
AD
1921 case CHIP_TOPAZ:
1922 case CHIP_TONGA:
1923 case CHIP_FIJI:
1924 case CHIP_POLARIS10:
1925 case CHIP_POLARIS11:
1926 case CHIP_POLARIS12:
1927 case CHIP_VEGAM:
1928 case CHIP_CARRIZO:
1929 case CHIP_STONEY:
27c0bc71 1930 case CHIP_VEGA20:
44b3253a 1931 case CHIP_ALDEBARAN:
84d244a3
JC
1932 case CHIP_SIENNA_CICHLID:
1933 case CHIP_NAVY_FLOUNDER:
eac88a5f 1934 case CHIP_DIMGREY_CAVEFISH:
0e5f4b09 1935 case CHIP_BEIGE_GOBY:
e2a75f88
AD
1936 default:
1937 return 0;
1938 case CHIP_VEGA10:
1939 chip_name = "vega10";
1940 break;
3f76dced
AD
1941 case CHIP_VEGA12:
1942 chip_name = "vega12";
1943 break;
2d2e5e7e 1944 case CHIP_RAVEN:
54f78a76 1945 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
54c4d17e 1946 chip_name = "raven2";
54f78a76 1947 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
741deade 1948 chip_name = "picasso";
54c4d17e
FX
1949 else
1950 chip_name = "raven";
2d2e5e7e 1951 break;
65e60f6e
LM
1952 case CHIP_ARCTURUS:
1953 chip_name = "arcturus";
1954 break;
b51a26a0 1955 case CHIP_RENOIR:
2e62f0b5
PL
1956 if (adev->apu_flags & AMD_APU_IS_RENOIR)
1957 chip_name = "renoir";
1958 else
1959 chip_name = "green_sardine";
b51a26a0 1960 break;
23c6268e
HR
1961 case CHIP_NAVI10:
1962 chip_name = "navi10";
1963 break;
ed42cfe1
XY
1964 case CHIP_NAVI14:
1965 chip_name = "navi14";
1966 break;
42b325e5
XY
1967 case CHIP_NAVI12:
1968 chip_name = "navi12";
1969 break;
4e52a9f8
HR
1970 case CHIP_VANGOGH:
1971 chip_name = "vangogh";
1972 break;
8bf84f60
AL
1973 case CHIP_YELLOW_CARP:
1974 chip_name = "yellow_carp";
1975 break;
e2a75f88
AD
1976 }
1977
1978 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
ab4fe3e1 1979 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
e2a75f88
AD
1980 if (err) {
1981 dev_err(adev->dev,
1982 "Failed to load gpu_info firmware \"%s\"\n",
1983 fw_name);
1984 goto out;
1985 }
ab4fe3e1 1986 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
e2a75f88
AD
1987 if (err) {
1988 dev_err(adev->dev,
1989 "Failed to validate gpu_info firmware \"%s\"\n",
1990 fw_name);
1991 goto out;
1992 }
1993
ab4fe3e1 1994 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
e2a75f88
AD
1995 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1996
1997 switch (hdr->version_major) {
1998 case 1:
1999 {
2000 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
ab4fe3e1 2001 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
e2a75f88
AD
2002 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2003
cc375d8c
TY
2004 /*
2005 * Should be droped when DAL no longer needs it.
2006 */
2007 if (adev->asic_type == CHIP_NAVI12)
ec51d3fa
XY
2008 goto parse_soc_bounding_box;
2009
b5ab16bf
AD
2010 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2011 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2012 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2013 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
e2a75f88 2014 adev->gfx.config.max_texture_channel_caches =
b5ab16bf
AD
2015 le32_to_cpu(gpu_info_fw->gc_num_tccs);
2016 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2017 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2018 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2019 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
e2a75f88 2020 adev->gfx.config.double_offchip_lds_buf =
b5ab16bf
AD
2021 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2022 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
51fd0370
HZ
2023 adev->gfx.cu_info.max_waves_per_simd =
2024 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2025 adev->gfx.cu_info.max_scratch_slots_per_cu =
2026 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2027 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
48321c3d 2028 if (hdr->version_minor >= 1) {
35c2e910
HZ
2029 const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2030 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2031 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2032 adev->gfx.config.num_sc_per_sh =
2033 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2034 adev->gfx.config.num_packer_per_sc =
2035 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2036 }
ec51d3fa
XY
2037
2038parse_soc_bounding_box:
ec51d3fa
XY
2039 /*
2040 * soc bounding box info is not integrated in disocovery table,
258620d0 2041 * we always need to parse it from gpu info firmware if needed.
ec51d3fa 2042 */
48321c3d
HW
2043 if (hdr->version_minor == 2) {
2044 const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2045 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2046 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2047 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2048 }
e2a75f88
AD
2049 break;
2050 }
2051 default:
2052 dev_err(adev->dev,
2053 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2054 err = -EINVAL;
2055 goto out;
2056 }
2057out:
e2a75f88
AD
2058 return err;
2059}
2060
e3ecdffa
AD
2061/**
2062 * amdgpu_device_ip_early_init - run early init for hardware IPs
2063 *
2064 * @adev: amdgpu_device pointer
2065 *
2066 * Early initialization pass for hardware IPs. The hardware IPs that make
2067 * up each asic are discovered each IP's early_init callback is run. This
2068 * is the first stage in initializing the asic.
2069 * Returns 0 on success, negative error code on failure.
2070 */
06ec9070 2071static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
d38ceaf9 2072{
aaa36a97 2073 int i, r;
d38ceaf9 2074
483ef985 2075 amdgpu_device_enable_virtual_display(adev);
a6be7570 2076
00a979f3 2077 if (amdgpu_sriov_vf(adev)) {
00a979f3 2078 r = amdgpu_virt_request_full_gpu(adev, true);
aaa36a97
AD
2079 if (r)
2080 return r;
00a979f3
WS
2081 }
2082
d38ceaf9 2083 switch (adev->asic_type) {
33f34802
KW
2084#ifdef CONFIG_DRM_AMDGPU_SI
2085 case CHIP_VERDE:
2086 case CHIP_TAHITI:
2087 case CHIP_PITCAIRN:
2088 case CHIP_OLAND:
2089 case CHIP_HAINAN:
295d0daf 2090 adev->family = AMDGPU_FAMILY_SI;
33f34802
KW
2091 r = si_set_ip_blocks(adev);
2092 if (r)
2093 return r;
2094 break;
2095#endif
a2e73f56
AD
2096#ifdef CONFIG_DRM_AMDGPU_CIK
2097 case CHIP_BONAIRE:
2098 case CHIP_HAWAII:
2099 case CHIP_KAVERI:
2100 case CHIP_KABINI:
2101 case CHIP_MULLINS:
e1ad2d53 2102 if (adev->flags & AMD_IS_APU)
a2e73f56 2103 adev->family = AMDGPU_FAMILY_KV;
e1ad2d53
AD
2104 else
2105 adev->family = AMDGPU_FAMILY_CI;
a2e73f56
AD
2106
2107 r = cik_set_ip_blocks(adev);
2108 if (r)
2109 return r;
2110 break;
2111#endif
da87c30b
AD
2112 case CHIP_TOPAZ:
2113 case CHIP_TONGA:
2114 case CHIP_FIJI:
2115 case CHIP_POLARIS10:
2116 case CHIP_POLARIS11:
2117 case CHIP_POLARIS12:
2118 case CHIP_VEGAM:
2119 case CHIP_CARRIZO:
2120 case CHIP_STONEY:
2121 if (adev->flags & AMD_IS_APU)
2122 adev->family = AMDGPU_FAMILY_CZ;
2123 else
2124 adev->family = AMDGPU_FAMILY_VI;
2125
2126 r = vi_set_ip_blocks(adev);
2127 if (r)
2128 return r;
2129 break;
d38ceaf9 2130 default:
63352b7f
AD
2131 r = amdgpu_discovery_set_ip_blocks(adev);
2132 if (r)
2133 return r;
2134 break;
d38ceaf9
AD
2135 }
2136
1884734a 2137 amdgpu_amdkfd_device_probe(adev);
2138
3b94fb10 2139 adev->pm.pp_feature = amdgpu_pp_feature_mask;
a35ad98b 2140 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
00544006 2141 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
4215a119
HC
2142 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2143 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
00f54b97 2144
d38ceaf9
AD
2145 for (i = 0; i < adev->num_ip_blocks; i++) {
2146 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
ed8cf00c
HR
2147 DRM_ERROR("disabled ip block: %d <%s>\n",
2148 i, adev->ip_blocks[i].version->funcs->name);
a1255107 2149 adev->ip_blocks[i].status.valid = false;
d38ceaf9 2150 } else {
a1255107
AD
2151 if (adev->ip_blocks[i].version->funcs->early_init) {
2152 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2c1a2784 2153 if (r == -ENOENT) {
a1255107 2154 adev->ip_blocks[i].status.valid = false;
2c1a2784 2155 } else if (r) {
a1255107
AD
2156 DRM_ERROR("early_init of IP block <%s> failed %d\n",
2157 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 2158 return r;
2c1a2784 2159 } else {
a1255107 2160 adev->ip_blocks[i].status.valid = true;
2c1a2784 2161 }
974e6b64 2162 } else {
a1255107 2163 adev->ip_blocks[i].status.valid = true;
d38ceaf9 2164 }
d38ceaf9 2165 }
21a249ca
AD
2166 /* get the vbios after the asic_funcs are set up */
2167 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
6e29c227
AD
2168 r = amdgpu_device_parse_gpu_info_fw(adev);
2169 if (r)
2170 return r;
2171
21a249ca
AD
2172 /* Read BIOS */
2173 if (!amdgpu_get_bios(adev))
2174 return -EINVAL;
2175
2176 r = amdgpu_atombios_init(adev);
2177 if (r) {
2178 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2179 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2180 return r;
2181 }
77eabc6f
PJZ
2182
2183 /*get pf2vf msg info at it's earliest time*/
2184 if (amdgpu_sriov_vf(adev))
2185 amdgpu_virt_init_data_exchange(adev);
2186
21a249ca 2187 }
d38ceaf9
AD
2188 }
2189
395d1fb9
NH
2190 adev->cg_flags &= amdgpu_cg_mask;
2191 adev->pg_flags &= amdgpu_pg_mask;
2192
d38ceaf9
AD
2193 return 0;
2194}
2195
0a4f2520
RZ
2196static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2197{
2198 int i, r;
2199
2200 for (i = 0; i < adev->num_ip_blocks; i++) {
2201 if (!adev->ip_blocks[i].status.sw)
2202 continue;
2203 if (adev->ip_blocks[i].status.hw)
2204 continue;
2205 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2d11fd3f 2206 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
0a4f2520
RZ
2207 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2208 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2209 if (r) {
2210 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2211 adev->ip_blocks[i].version->funcs->name, r);
2212 return r;
2213 }
2214 adev->ip_blocks[i].status.hw = true;
2215 }
2216 }
2217
2218 return 0;
2219}
2220
2221static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2222{
2223 int i, r;
2224
2225 for (i = 0; i < adev->num_ip_blocks; i++) {
2226 if (!adev->ip_blocks[i].status.sw)
2227 continue;
2228 if (adev->ip_blocks[i].status.hw)
2229 continue;
2230 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2231 if (r) {
2232 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2233 adev->ip_blocks[i].version->funcs->name, r);
2234 return r;
2235 }
2236 adev->ip_blocks[i].status.hw = true;
2237 }
2238
2239 return 0;
2240}
2241
7a3e0bb2
RZ
2242static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2243{
2244 int r = 0;
2245 int i;
80f41f84 2246 uint32_t smu_version;
7a3e0bb2
RZ
2247
2248 if (adev->asic_type >= CHIP_VEGA10) {
2249 for (i = 0; i < adev->num_ip_blocks; i++) {
482f0e53
ML
2250 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2251 continue;
2252
e3c1b071 2253 if (!adev->ip_blocks[i].status.sw)
2254 continue;
2255
482f0e53
ML
2256 /* no need to do the fw loading again if already done*/
2257 if (adev->ip_blocks[i].status.hw == true)
2258 break;
2259
53b3f8f4 2260 if (amdgpu_in_reset(adev) || adev->in_suspend) {
482f0e53
ML
2261 r = adev->ip_blocks[i].version->funcs->resume(adev);
2262 if (r) {
2263 DRM_ERROR("resume of IP block <%s> failed %d\n",
7a3e0bb2 2264 adev->ip_blocks[i].version->funcs->name, r);
482f0e53
ML
2265 return r;
2266 }
2267 } else {
2268 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2269 if (r) {
2270 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2271 adev->ip_blocks[i].version->funcs->name, r);
2272 return r;
7a3e0bb2 2273 }
7a3e0bb2 2274 }
482f0e53
ML
2275
2276 adev->ip_blocks[i].status.hw = true;
2277 break;
7a3e0bb2
RZ
2278 }
2279 }
482f0e53 2280
8973d9ec
ED
2281 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2282 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
7a3e0bb2 2283
80f41f84 2284 return r;
7a3e0bb2
RZ
2285}
2286
e3ecdffa
AD
2287/**
2288 * amdgpu_device_ip_init - run init for hardware IPs
2289 *
2290 * @adev: amdgpu_device pointer
2291 *
2292 * Main initialization pass for hardware IPs. The list of all the hardware
2293 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2294 * are run. sw_init initializes the software state associated with each IP
2295 * and hw_init initializes the hardware associated with each IP.
2296 * Returns 0 on success, negative error code on failure.
2297 */
06ec9070 2298static int amdgpu_device_ip_init(struct amdgpu_device *adev)
d38ceaf9
AD
2299{
2300 int i, r;
2301
c030f2e4 2302 r = amdgpu_ras_init(adev);
2303 if (r)
2304 return r;
2305
d38ceaf9 2306 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2307 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 2308 continue;
a1255107 2309 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2c1a2784 2310 if (r) {
a1255107
AD
2311 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2312 adev->ip_blocks[i].version->funcs->name, r);
72d3f592 2313 goto init_failed;
2c1a2784 2314 }
a1255107 2315 adev->ip_blocks[i].status.sw = true;
bfca0289 2316
d38ceaf9 2317 /* need to do gmc hw init early so we can allocate gpu mem */
a1255107 2318 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
06ec9070 2319 r = amdgpu_device_vram_scratch_init(adev);
2c1a2784
AD
2320 if (r) {
2321 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
72d3f592 2322 goto init_failed;
2c1a2784 2323 }
a1255107 2324 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784
AD
2325 if (r) {
2326 DRM_ERROR("hw_init %d failed %d\n", i, r);
72d3f592 2327 goto init_failed;
2c1a2784 2328 }
06ec9070 2329 r = amdgpu_device_wb_init(adev);
2c1a2784 2330 if (r) {
06ec9070 2331 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
72d3f592 2332 goto init_failed;
2c1a2784 2333 }
a1255107 2334 adev->ip_blocks[i].status.hw = true;
2493664f
ML
2335
2336 /* right after GMC hw init, we create CSA */
f92d5c61 2337 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
1e256e27
RZ
2338 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2339 AMDGPU_GEM_DOMAIN_VRAM,
2340 AMDGPU_CSA_SIZE);
2493664f
ML
2341 if (r) {
2342 DRM_ERROR("allocate CSA failed %d\n", r);
72d3f592 2343 goto init_failed;
2493664f
ML
2344 }
2345 }
d38ceaf9
AD
2346 }
2347 }
2348
c9ffa427
YT
2349 if (amdgpu_sriov_vf(adev))
2350 amdgpu_virt_init_data_exchange(adev);
2351
533aed27
AG
2352 r = amdgpu_ib_pool_init(adev);
2353 if (r) {
2354 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2355 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2356 goto init_failed;
2357 }
2358
c8963ea4
RZ
2359 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2360 if (r)
72d3f592 2361 goto init_failed;
0a4f2520
RZ
2362
2363 r = amdgpu_device_ip_hw_init_phase1(adev);
2364 if (r)
72d3f592 2365 goto init_failed;
0a4f2520 2366
7a3e0bb2
RZ
2367 r = amdgpu_device_fw_loading(adev);
2368 if (r)
72d3f592 2369 goto init_failed;
7a3e0bb2 2370
0a4f2520
RZ
2371 r = amdgpu_device_ip_hw_init_phase2(adev);
2372 if (r)
72d3f592 2373 goto init_failed;
d38ceaf9 2374
121a2bc6
AG
2375 /*
2376 * retired pages will be loaded from eeprom and reserved here,
2377 * it should be called after amdgpu_device_ip_hw_init_phase2 since
2378 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2379 * for I2C communication which only true at this point.
b82e65a9
GC
2380 *
2381 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2382 * failure from bad gpu situation and stop amdgpu init process
2383 * accordingly. For other failed cases, it will still release all
2384 * the resource and print error message, rather than returning one
2385 * negative value to upper level.
121a2bc6
AG
2386 *
2387 * Note: theoretically, this should be called before all vram allocations
2388 * to protect retired page from abusing
2389 */
b82e65a9
GC
2390 r = amdgpu_ras_recovery_init(adev);
2391 if (r)
2392 goto init_failed;
121a2bc6 2393
3e2e2ab5
HZ
2394 if (adev->gmc.xgmi.num_physical_nodes > 1)
2395 amdgpu_xgmi_add_device(adev);
e3c1b071 2396
2397 /* Don't init kfd if whole hive need to be reset during init */
2398 if (!adev->gmc.xgmi.pending_reset)
2399 amdgpu_amdkfd_device_init(adev);
c6332b97 2400
bd607166
KR
2401 amdgpu_fru_get_product_info(adev);
2402
72d3f592 2403init_failed:
c9ffa427 2404 if (amdgpu_sriov_vf(adev))
c6332b97 2405 amdgpu_virt_release_full_gpu(adev, true);
2406
72d3f592 2407 return r;
d38ceaf9
AD
2408}
2409
e3ecdffa
AD
2410/**
2411 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2412 *
2413 * @adev: amdgpu_device pointer
2414 *
2415 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
2416 * this function before a GPU reset. If the value is retained after a
2417 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
2418 */
06ec9070 2419static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
0c49e0b8
CZ
2420{
2421 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2422}
2423
e3ecdffa
AD
2424/**
2425 * amdgpu_device_check_vram_lost - check if vram is valid
2426 *
2427 * @adev: amdgpu_device pointer
2428 *
2429 * Checks the reset magic value written to the gart pointer in VRAM.
2430 * The driver calls this after a GPU reset to see if the contents of
2431 * VRAM is lost or now.
2432 * returns true if vram is lost, false if not.
2433 */
06ec9070 2434static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
0c49e0b8 2435{
dadce777
EQ
2436 if (memcmp(adev->gart.ptr, adev->reset_magic,
2437 AMDGPU_RESET_MAGIC_NUM))
2438 return true;
2439
53b3f8f4 2440 if (!amdgpu_in_reset(adev))
dadce777
EQ
2441 return false;
2442
2443 /*
2444 * For all ASICs with baco/mode1 reset, the VRAM is
2445 * always assumed to be lost.
2446 */
2447 switch (amdgpu_asic_reset_method(adev)) {
2448 case AMD_RESET_METHOD_BACO:
2449 case AMD_RESET_METHOD_MODE1:
2450 return true;
2451 default:
2452 return false;
2453 }
0c49e0b8
CZ
2454}
2455
e3ecdffa 2456/**
1112a46b 2457 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
e3ecdffa
AD
2458 *
2459 * @adev: amdgpu_device pointer
b8b72130 2460 * @state: clockgating state (gate or ungate)
e3ecdffa 2461 *
e3ecdffa 2462 * The list of all the hardware IPs that make up the asic is walked and the
1112a46b
RZ
2463 * set_clockgating_state callbacks are run.
2464 * Late initialization pass enabling clockgating for hardware IPs.
2465 * Fini or suspend, pass disabling clockgating for hardware IPs.
e3ecdffa
AD
2466 * Returns 0 on success, negative error code on failure.
2467 */
fdd34271 2468
5d89bb2d
LL
2469int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2470 enum amd_clockgating_state state)
d38ceaf9 2471{
1112a46b 2472 int i, j, r;
d38ceaf9 2473
4a2ba394
SL
2474 if (amdgpu_emu_mode == 1)
2475 return 0;
2476
1112a46b
RZ
2477 for (j = 0; j < adev->num_ip_blocks; j++) {
2478 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
a2d31dc3 2479 if (!adev->ip_blocks[i].status.late_initialized)
d38ceaf9 2480 continue;
5d70a549
PV
2481 /* skip CG for GFX on S0ix */
2482 if (adev->in_s0ix &&
2483 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2484 continue;
4a446d55 2485 /* skip CG for VCE/UVD, it's handled specially */
a1255107 2486 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
57716327 2487 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
34319b32 2488 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
52f2e779 2489 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
57716327 2490 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
4a446d55 2491 /* enable clockgating to save power */
a1255107 2492 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1112a46b 2493 state);
4a446d55
AD
2494 if (r) {
2495 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
a1255107 2496 adev->ip_blocks[i].version->funcs->name, r);
4a446d55
AD
2497 return r;
2498 }
b0b00ff1 2499 }
d38ceaf9 2500 }
06b18f61 2501
c9f96fd5
RZ
2502 return 0;
2503}
2504
5d89bb2d
LL
2505int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2506 enum amd_powergating_state state)
c9f96fd5 2507{
1112a46b 2508 int i, j, r;
06b18f61 2509
c9f96fd5
RZ
2510 if (amdgpu_emu_mode == 1)
2511 return 0;
2512
1112a46b
RZ
2513 for (j = 0; j < adev->num_ip_blocks; j++) {
2514 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
a2d31dc3 2515 if (!adev->ip_blocks[i].status.late_initialized)
c9f96fd5 2516 continue;
5d70a549
PV
2517 /* skip PG for GFX on S0ix */
2518 if (adev->in_s0ix &&
2519 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2520 continue;
c9f96fd5
RZ
2521 /* skip CG for VCE/UVD, it's handled specially */
2522 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2523 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2524 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
52f2e779 2525 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
c9f96fd5
RZ
2526 adev->ip_blocks[i].version->funcs->set_powergating_state) {
2527 /* enable powergating to save power */
2528 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
1112a46b 2529 state);
c9f96fd5
RZ
2530 if (r) {
2531 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2532 adev->ip_blocks[i].version->funcs->name, r);
2533 return r;
2534 }
2535 }
2536 }
2dc80b00
S
2537 return 0;
2538}
2539
beff74bc
AD
2540static int amdgpu_device_enable_mgpu_fan_boost(void)
2541{
2542 struct amdgpu_gpu_instance *gpu_ins;
2543 struct amdgpu_device *adev;
2544 int i, ret = 0;
2545
2546 mutex_lock(&mgpu_info.mutex);
2547
2548 /*
2549 * MGPU fan boost feature should be enabled
2550 * only when there are two or more dGPUs in
2551 * the system
2552 */
2553 if (mgpu_info.num_dgpu < 2)
2554 goto out;
2555
2556 for (i = 0; i < mgpu_info.num_dgpu; i++) {
2557 gpu_ins = &(mgpu_info.gpu_ins[i]);
2558 adev = gpu_ins->adev;
2559 if (!(adev->flags & AMD_IS_APU) &&
f10bb940 2560 !gpu_ins->mgpu_fan_enabled) {
beff74bc
AD
2561 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2562 if (ret)
2563 break;
2564
2565 gpu_ins->mgpu_fan_enabled = 1;
2566 }
2567 }
2568
2569out:
2570 mutex_unlock(&mgpu_info.mutex);
2571
2572 return ret;
2573}
2574
e3ecdffa
AD
2575/**
2576 * amdgpu_device_ip_late_init - run late init for hardware IPs
2577 *
2578 * @adev: amdgpu_device pointer
2579 *
2580 * Late initialization pass for hardware IPs. The list of all the hardware
2581 * IPs that make up the asic is walked and the late_init callbacks are run.
2582 * late_init covers any special initialization that an IP requires
2583 * after all of the have been initialized or something that needs to happen
2584 * late in the init process.
2585 * Returns 0 on success, negative error code on failure.
2586 */
06ec9070 2587static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2dc80b00 2588{
60599a03 2589 struct amdgpu_gpu_instance *gpu_instance;
2dc80b00
S
2590 int i = 0, r;
2591
2592 for (i = 0; i < adev->num_ip_blocks; i++) {
73f847db 2593 if (!adev->ip_blocks[i].status.hw)
2dc80b00
S
2594 continue;
2595 if (adev->ip_blocks[i].version->funcs->late_init) {
2596 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2597 if (r) {
2598 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2599 adev->ip_blocks[i].version->funcs->name, r);
2600 return r;
2601 }
2dc80b00 2602 }
73f847db 2603 adev->ip_blocks[i].status.late_initialized = true;
2dc80b00
S
2604 }
2605
a891d239
DL
2606 amdgpu_ras_set_error_query_ready(adev, true);
2607
1112a46b
RZ
2608 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2609 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
916ac57f 2610
06ec9070 2611 amdgpu_device_fill_reset_magic(adev);
d38ceaf9 2612
beff74bc
AD
2613 r = amdgpu_device_enable_mgpu_fan_boost();
2614 if (r)
2615 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2616
2d02893f 2617 /* For XGMI + passthrough configuration on arcturus, enable light SBR */
2618 if (adev->asic_type == CHIP_ARCTURUS &&
2619 amdgpu_passthrough(adev) &&
2620 adev->gmc.xgmi.num_physical_nodes > 1)
2621 smu_set_light_sbr(&adev->smu, true);
60599a03
EQ
2622
2623 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2624 mutex_lock(&mgpu_info.mutex);
2625
2626 /*
2627 * Reset device p-state to low as this was booted with high.
2628 *
2629 * This should be performed only after all devices from the same
2630 * hive get initialized.
2631 *
2632 * However, it's unknown how many device in the hive in advance.
2633 * As this is counted one by one during devices initializations.
2634 *
2635 * So, we wait for all XGMI interlinked devices initialized.
2636 * This may bring some delays as those devices may come from
2637 * different hives. But that should be OK.
2638 */
2639 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2640 for (i = 0; i < mgpu_info.num_gpu; i++) {
2641 gpu_instance = &(mgpu_info.gpu_ins[i]);
2642 if (gpu_instance->adev->flags & AMD_IS_APU)
2643 continue;
2644
d84a430d
JK
2645 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2646 AMDGPU_XGMI_PSTATE_MIN);
60599a03
EQ
2647 if (r) {
2648 DRM_ERROR("pstate setting failed (%d).\n", r);
2649 break;
2650 }
2651 }
2652 }
2653
2654 mutex_unlock(&mgpu_info.mutex);
2655 }
2656
d38ceaf9
AD
2657 return 0;
2658}
2659
e9669fb7 2660static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
d38ceaf9
AD
2661{
2662 int i, r;
2663
e9669fb7
AG
2664 for (i = 0; i < adev->num_ip_blocks; i++) {
2665 if (!adev->ip_blocks[i].version->funcs->early_fini)
2666 continue;
5278a159 2667
e9669fb7
AG
2668 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2669 if (r) {
2670 DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2671 adev->ip_blocks[i].version->funcs->name, r);
2672 }
2673 }
c030f2e4 2674
e9669fb7 2675 amdgpu_amdkfd_suspend(adev, false);
a82400b5 2676
05df1f01 2677 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
fdd34271
RZ
2678 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2679
3e96dbfd
AD
2680 /* need to disable SMC first */
2681 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2682 if (!adev->ip_blocks[i].status.hw)
3e96dbfd 2683 continue;
fdd34271 2684 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
a1255107 2685 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3e96dbfd
AD
2686 /* XXX handle errors */
2687 if (r) {
2688 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
a1255107 2689 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd 2690 }
a1255107 2691 adev->ip_blocks[i].status.hw = false;
3e96dbfd
AD
2692 break;
2693 }
2694 }
2695
d38ceaf9 2696 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 2697 if (!adev->ip_blocks[i].status.hw)
d38ceaf9 2698 continue;
8201a67a 2699
a1255107 2700 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
d38ceaf9 2701 /* XXX handle errors */
2c1a2784 2702 if (r) {
a1255107
AD
2703 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2704 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 2705 }
8201a67a 2706
a1255107 2707 adev->ip_blocks[i].status.hw = false;
d38ceaf9
AD
2708 }
2709
6effad8a
GC
2710 if (amdgpu_sriov_vf(adev)) {
2711 if (amdgpu_virt_release_full_gpu(adev, false))
2712 DRM_ERROR("failed to release exclusive mode on fini\n");
2713 }
2714
e9669fb7
AG
2715 return 0;
2716}
2717
2718/**
2719 * amdgpu_device_ip_fini - run fini for hardware IPs
2720 *
2721 * @adev: amdgpu_device pointer
2722 *
2723 * Main teardown pass for hardware IPs. The list of all the hardware
2724 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2725 * are run. hw_fini tears down the hardware associated with each IP
2726 * and sw_fini tears down any software state associated with each IP.
2727 * Returns 0 on success, negative error code on failure.
2728 */
2729static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2730{
2731 int i, r;
2732
2733 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2734 amdgpu_virt_release_ras_err_handler_data(adev);
2735
e9669fb7
AG
2736 if (adev->gmc.xgmi.num_physical_nodes > 1)
2737 amdgpu_xgmi_remove_device(adev);
2738
2739 amdgpu_amdkfd_device_fini_sw(adev);
9950cda2 2740
d38ceaf9 2741 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 2742 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 2743 continue;
c12aba3a
ML
2744
2745 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
c8963ea4 2746 amdgpu_ucode_free_bo(adev);
1e256e27 2747 amdgpu_free_static_csa(&adev->virt.csa_obj);
c12aba3a
ML
2748 amdgpu_device_wb_fini(adev);
2749 amdgpu_device_vram_scratch_fini(adev);
533aed27 2750 amdgpu_ib_pool_fini(adev);
c12aba3a
ML
2751 }
2752
a1255107 2753 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
d38ceaf9 2754 /* XXX handle errors */
2c1a2784 2755 if (r) {
a1255107
AD
2756 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2757 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 2758 }
a1255107
AD
2759 adev->ip_blocks[i].status.sw = false;
2760 adev->ip_blocks[i].status.valid = false;
d38ceaf9
AD
2761 }
2762
a6dcfd9c 2763 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 2764 if (!adev->ip_blocks[i].status.late_initialized)
8a2eef1d 2765 continue;
a1255107
AD
2766 if (adev->ip_blocks[i].version->funcs->late_fini)
2767 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2768 adev->ip_blocks[i].status.late_initialized = false;
a6dcfd9c
ML
2769 }
2770
c030f2e4 2771 amdgpu_ras_fini(adev);
2772
d38ceaf9
AD
2773 return 0;
2774}
2775
e3ecdffa 2776/**
beff74bc 2777 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
e3ecdffa 2778 *
1112a46b 2779 * @work: work_struct.
e3ecdffa 2780 */
beff74bc 2781static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2dc80b00
S
2782{
2783 struct amdgpu_device *adev =
beff74bc 2784 container_of(work, struct amdgpu_device, delayed_init_work.work);
916ac57f
RZ
2785 int r;
2786
2787 r = amdgpu_ib_ring_tests(adev);
2788 if (r)
2789 DRM_ERROR("ib ring test failed (%d).\n", r);
2dc80b00
S
2790}
2791
1e317b99
RZ
2792static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2793{
2794 struct amdgpu_device *adev =
2795 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2796
90a92662
MD
2797 WARN_ON_ONCE(adev->gfx.gfx_off_state);
2798 WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2799
2800 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2801 adev->gfx.gfx_off_state = true;
1e317b99
RZ
2802}
2803
e3ecdffa 2804/**
e7854a03 2805 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
e3ecdffa
AD
2806 *
2807 * @adev: amdgpu_device pointer
2808 *
2809 * Main suspend function for hardware IPs. The list of all the hardware
2810 * IPs that make up the asic is walked, clockgating is disabled and the
2811 * suspend callbacks are run. suspend puts the hardware and software state
2812 * in each IP into a state suitable for suspend.
2813 * Returns 0 on success, negative error code on failure.
2814 */
e7854a03
AD
2815static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2816{
2817 int i, r;
2818
50ec83f0
AD
2819 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2820 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
05df1f01 2821
e7854a03
AD
2822 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2823 if (!adev->ip_blocks[i].status.valid)
2824 continue;
2b9f7848 2825
e7854a03 2826 /* displays are handled separately */
2b9f7848
ND
2827 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2828 continue;
2829
2830 /* XXX handle errors */
2831 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2832 /* XXX handle errors */
2833 if (r) {
2834 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2835 adev->ip_blocks[i].version->funcs->name, r);
2836 return r;
e7854a03 2837 }
2b9f7848
ND
2838
2839 adev->ip_blocks[i].status.hw = false;
e7854a03
AD
2840 }
2841
e7854a03
AD
2842 return 0;
2843}
2844
2845/**
2846 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2847 *
2848 * @adev: amdgpu_device pointer
2849 *
2850 * Main suspend function for hardware IPs. The list of all the hardware
2851 * IPs that make up the asic is walked, clockgating is disabled and the
2852 * suspend callbacks are run. suspend puts the hardware and software state
2853 * in each IP into a state suitable for suspend.
2854 * Returns 0 on success, negative error code on failure.
2855 */
2856static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
d38ceaf9
AD
2857{
2858 int i, r;
2859
557f42a2 2860 if (adev->in_s0ix)
34416931 2861 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
34416931 2862
d38ceaf9 2863 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 2864 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 2865 continue;
e7854a03
AD
2866 /* displays are handled in phase1 */
2867 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2868 continue;
bff77e86
LM
2869 /* PSP lost connection when err_event_athub occurs */
2870 if (amdgpu_ras_intr_triggered() &&
2871 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2872 adev->ip_blocks[i].status.hw = false;
2873 continue;
2874 }
e3c1b071 2875
2876 /* skip unnecessary suspend if we do not initialize them yet */
2877 if (adev->gmc.xgmi.pending_reset &&
2878 !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2879 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2880 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2881 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2882 adev->ip_blocks[i].status.hw = false;
2883 continue;
2884 }
557f42a2 2885
32ff160d
AD
2886 /* skip suspend of gfx and psp for S0ix
2887 * gfx is in gfxoff state, so on resume it will exit gfxoff just
2888 * like at runtime. PSP is also part of the always on hardware
2889 * so no need to suspend it.
2890 */
557f42a2 2891 if (adev->in_s0ix &&
32ff160d
AD
2892 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
2893 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
557f42a2
AD
2894 continue;
2895
d38ceaf9 2896 /* XXX handle errors */
a1255107 2897 r = adev->ip_blocks[i].version->funcs->suspend(adev);
d38ceaf9 2898 /* XXX handle errors */
2c1a2784 2899 if (r) {
a1255107
AD
2900 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2901 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 2902 }
876923fb 2903 adev->ip_blocks[i].status.hw = false;
a3a09142 2904 /* handle putting the SMC in the appropriate state */
86b93fd6
JZ
2905 if(!amdgpu_sriov_vf(adev)){
2906 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2907 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
2908 if (r) {
2909 DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2910 adev->mp1_state, r);
2911 return r;
2912 }
a3a09142
AD
2913 }
2914 }
d38ceaf9
AD
2915 }
2916
2917 return 0;
2918}
2919
e7854a03
AD
2920/**
2921 * amdgpu_device_ip_suspend - run suspend for hardware IPs
2922 *
2923 * @adev: amdgpu_device pointer
2924 *
2925 * Main suspend function for hardware IPs. The list of all the hardware
2926 * IPs that make up the asic is walked, clockgating is disabled and the
2927 * suspend callbacks are run. suspend puts the hardware and software state
2928 * in each IP into a state suitable for suspend.
2929 * Returns 0 on success, negative error code on failure.
2930 */
2931int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2932{
2933 int r;
2934
3c73683c
JC
2935 if (amdgpu_sriov_vf(adev)) {
2936 amdgpu_virt_fini_data_exchange(adev);
e7819644 2937 amdgpu_virt_request_full_gpu(adev, false);
3c73683c 2938 }
e7819644 2939
e7854a03
AD
2940 r = amdgpu_device_ip_suspend_phase1(adev);
2941 if (r)
2942 return r;
2943 r = amdgpu_device_ip_suspend_phase2(adev);
2944
e7819644
YT
2945 if (amdgpu_sriov_vf(adev))
2946 amdgpu_virt_release_full_gpu(adev, false);
2947
e7854a03
AD
2948 return r;
2949}
2950
06ec9070 2951static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
a90ad3c2
ML
2952{
2953 int i, r;
2954
2cb681b6
ML
2955 static enum amd_ip_block_type ip_order[] = {
2956 AMD_IP_BLOCK_TYPE_GMC,
2957 AMD_IP_BLOCK_TYPE_COMMON,
39186aef 2958 AMD_IP_BLOCK_TYPE_PSP,
2cb681b6
ML
2959 AMD_IP_BLOCK_TYPE_IH,
2960 };
a90ad3c2 2961
95ea3dbc 2962 for (i = 0; i < adev->num_ip_blocks; i++) {
2cb681b6
ML
2963 int j;
2964 struct amdgpu_ip_block *block;
a90ad3c2 2965
4cd2a96d
J
2966 block = &adev->ip_blocks[i];
2967 block->status.hw = false;
2cb681b6 2968
4cd2a96d 2969 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
2cb681b6 2970
4cd2a96d 2971 if (block->version->type != ip_order[j] ||
2cb681b6
ML
2972 !block->status.valid)
2973 continue;
2974
2975 r = block->version->funcs->hw_init(adev);
0aaeefcc 2976 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
c41d1cf6
ML
2977 if (r)
2978 return r;
482f0e53 2979 block->status.hw = true;
a90ad3c2
ML
2980 }
2981 }
2982
2983 return 0;
2984}
2985
06ec9070 2986static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
a90ad3c2
ML
2987{
2988 int i, r;
2989
2cb681b6
ML
2990 static enum amd_ip_block_type ip_order[] = {
2991 AMD_IP_BLOCK_TYPE_SMC,
2992 AMD_IP_BLOCK_TYPE_DCE,
2993 AMD_IP_BLOCK_TYPE_GFX,
2994 AMD_IP_BLOCK_TYPE_SDMA,
257deb8c 2995 AMD_IP_BLOCK_TYPE_UVD,
d83c7a07
JJ
2996 AMD_IP_BLOCK_TYPE_VCE,
2997 AMD_IP_BLOCK_TYPE_VCN
2cb681b6 2998 };
a90ad3c2 2999
2cb681b6
ML
3000 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3001 int j;
3002 struct amdgpu_ip_block *block;
a90ad3c2 3003
2cb681b6
ML
3004 for (j = 0; j < adev->num_ip_blocks; j++) {
3005 block = &adev->ip_blocks[j];
3006
3007 if (block->version->type != ip_order[i] ||
482f0e53
ML
3008 !block->status.valid ||
3009 block->status.hw)
2cb681b6
ML
3010 continue;
3011
895bd048
JZ
3012 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3013 r = block->version->funcs->resume(adev);
3014 else
3015 r = block->version->funcs->hw_init(adev);
3016
0aaeefcc 3017 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
c41d1cf6
ML
3018 if (r)
3019 return r;
482f0e53 3020 block->status.hw = true;
a90ad3c2
ML
3021 }
3022 }
3023
3024 return 0;
3025}
3026
e3ecdffa
AD
3027/**
3028 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3029 *
3030 * @adev: amdgpu_device pointer
3031 *
3032 * First resume function for hardware IPs. The list of all the hardware
3033 * IPs that make up the asic is walked and the resume callbacks are run for
3034 * COMMON, GMC, and IH. resume puts the hardware into a functional state
3035 * after a suspend and updates the software state as necessary. This
3036 * function is also used for restoring the GPU after a GPU reset.
3037 * Returns 0 on success, negative error code on failure.
3038 */
06ec9070 3039static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
d38ceaf9
AD
3040{
3041 int i, r;
3042
a90ad3c2 3043 for (i = 0; i < adev->num_ip_blocks; i++) {
482f0e53 3044 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
a90ad3c2 3045 continue;
a90ad3c2 3046 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
e3ecdffa
AD
3047 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3048 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
482f0e53 3049
fcf0649f
CZ
3050 r = adev->ip_blocks[i].version->funcs->resume(adev);
3051 if (r) {
3052 DRM_ERROR("resume of IP block <%s> failed %d\n",
3053 adev->ip_blocks[i].version->funcs->name, r);
3054 return r;
3055 }
482f0e53 3056 adev->ip_blocks[i].status.hw = true;
a90ad3c2
ML
3057 }
3058 }
3059
3060 return 0;
3061}
3062
e3ecdffa
AD
3063/**
3064 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3065 *
3066 * @adev: amdgpu_device pointer
3067 *
3068 * First resume function for hardware IPs. The list of all the hardware
3069 * IPs that make up the asic is walked and the resume callbacks are run for
3070 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
3071 * functional state after a suspend and updates the software state as
3072 * necessary. This function is also used for restoring the GPU after a GPU
3073 * reset.
3074 * Returns 0 on success, negative error code on failure.
3075 */
06ec9070 3076static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
d38ceaf9
AD
3077{
3078 int i, r;
3079
3080 for (i = 0; i < adev->num_ip_blocks; i++) {
482f0e53 3081 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
d38ceaf9 3082 continue;
fcf0649f 3083 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
e3ecdffa 3084 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
7a3e0bb2
RZ
3085 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3086 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
fcf0649f 3087 continue;
a1255107 3088 r = adev->ip_blocks[i].version->funcs->resume(adev);
2c1a2784 3089 if (r) {
a1255107
AD
3090 DRM_ERROR("resume of IP block <%s> failed %d\n",
3091 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 3092 return r;
2c1a2784 3093 }
482f0e53 3094 adev->ip_blocks[i].status.hw = true;
d38ceaf9
AD
3095 }
3096
3097 return 0;
3098}
3099
e3ecdffa
AD
3100/**
3101 * amdgpu_device_ip_resume - run resume for hardware IPs
3102 *
3103 * @adev: amdgpu_device pointer
3104 *
3105 * Main resume function for hardware IPs. The hardware IPs
3106 * are split into two resume functions because they are
3107 * are also used in in recovering from a GPU reset and some additional
3108 * steps need to be take between them. In this case (S3/S4) they are
3109 * run sequentially.
3110 * Returns 0 on success, negative error code on failure.
3111 */
06ec9070 3112static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
fcf0649f
CZ
3113{
3114 int r;
3115
9cec53c1
JZ
3116 r = amdgpu_amdkfd_resume_iommu(adev);
3117 if (r)
3118 return r;
3119
06ec9070 3120 r = amdgpu_device_ip_resume_phase1(adev);
fcf0649f
CZ
3121 if (r)
3122 return r;
7a3e0bb2
RZ
3123
3124 r = amdgpu_device_fw_loading(adev);
3125 if (r)
3126 return r;
3127
06ec9070 3128 r = amdgpu_device_ip_resume_phase2(adev);
fcf0649f
CZ
3129
3130 return r;
3131}
3132
e3ecdffa
AD
3133/**
3134 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3135 *
3136 * @adev: amdgpu_device pointer
3137 *
3138 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3139 */
4e99a44e 3140static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
048765ad 3141{
6867e1b5
ML
3142 if (amdgpu_sriov_vf(adev)) {
3143 if (adev->is_atom_fw) {
58ff791a 3144 if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
6867e1b5
ML
3145 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3146 } else {
3147 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3148 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3149 }
3150
3151 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3152 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
a5bde2f9 3153 }
048765ad
AR
3154}
3155
e3ecdffa
AD
3156/**
3157 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3158 *
3159 * @asic_type: AMD asic type
3160 *
3161 * Check if there is DC (new modesetting infrastructre) support for an asic.
3162 * returns true if DC has support, false if not.
3163 */
4562236b
HW
3164bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3165{
3166 switch (asic_type) {
3167#if defined(CONFIG_DRM_AMD_DC)
64200c46
MR
3168 case CHIP_TAHITI:
3169 case CHIP_PITCAIRN:
3170 case CHIP_VERDE:
3171 case CHIP_OLAND:
2d32ffd6
AD
3172 /*
3173 * We have systems in the wild with these ASICs that require
3174 * LVDS and VGA support which is not supported with DC.
3175 *
3176 * Fallback to the non-DC driver here by default so as not to
3177 * cause regressions.
3178 */
3179#if defined(CONFIG_DRM_AMD_DC_SI)
3180 return amdgpu_dc > 0;
3181#else
3182 return false;
64200c46 3183#endif
4562236b 3184 case CHIP_BONAIRE:
0d6fbccb 3185 case CHIP_KAVERI:
367e6687
AD
3186 case CHIP_KABINI:
3187 case CHIP_MULLINS:
d9fda248
HW
3188 /*
3189 * We have systems in the wild with these ASICs that require
3190 * LVDS and VGA support which is not supported with DC.
3191 *
3192 * Fallback to the non-DC driver here by default so as not to
3193 * cause regressions.
3194 */
3195 return amdgpu_dc > 0;
3196 case CHIP_HAWAII:
4562236b
HW
3197 case CHIP_CARRIZO:
3198 case CHIP_STONEY:
4562236b 3199 case CHIP_POLARIS10:
675fd32b 3200 case CHIP_POLARIS11:
2c8ad2d5 3201 case CHIP_POLARIS12:
675fd32b 3202 case CHIP_VEGAM:
4562236b
HW
3203 case CHIP_TONGA:
3204 case CHIP_FIJI:
42f8ffa1 3205 case CHIP_VEGA10:
dca7b401 3206 case CHIP_VEGA12:
c6034aa2 3207 case CHIP_VEGA20:
b86a1aa3 3208#if defined(CONFIG_DRM_AMD_DC_DCN)
fd187853 3209 case CHIP_RAVEN:
b4f199c7 3210 case CHIP_NAVI10:
8fceceb6 3211 case CHIP_NAVI14:
078655d9 3212 case CHIP_NAVI12:
e1c14c43 3213 case CHIP_RENOIR:
3f68c01b 3214 case CHIP_CYAN_SKILLFISH:
81d9bfb8 3215 case CHIP_SIENNA_CICHLID:
a6c5308f 3216 case CHIP_NAVY_FLOUNDER:
7cc656e2 3217 case CHIP_DIMGREY_CAVEFISH:
ddaed58b 3218 case CHIP_BEIGE_GOBY:
84b934bc 3219 case CHIP_VANGOGH:
c8b73f7f 3220 case CHIP_YELLOW_CARP:
42f8ffa1 3221#endif
f7f12b25 3222 default:
fd187853 3223 return amdgpu_dc != 0;
f7f12b25 3224#else
4562236b 3225 default:
93b09a9a 3226 if (amdgpu_dc > 0)
044a48f4 3227 DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
93b09a9a 3228 "but isn't supported by ASIC, ignoring\n");
4562236b 3229 return false;
f7f12b25 3230#endif
4562236b
HW
3231 }
3232}
3233
3234/**
3235 * amdgpu_device_has_dc_support - check if dc is supported
3236 *
982a820b 3237 * @adev: amdgpu_device pointer
4562236b
HW
3238 *
3239 * Returns true for supported, false for not supported
3240 */
3241bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3242{
abaf210c
AS
3243 if (amdgpu_sriov_vf(adev) ||
3244 adev->enable_virtual_display ||
3245 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
2555039d
XY
3246 return false;
3247
4562236b
HW
3248 return amdgpu_device_asic_has_dc_support(adev->asic_type);
3249}
3250
d4535e2c
AG
3251static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3252{
3253 struct amdgpu_device *adev =
3254 container_of(__work, struct amdgpu_device, xgmi_reset_work);
d95e8e97 3255 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
d4535e2c 3256
c6a6e2db
AG
3257 /* It's a bug to not have a hive within this function */
3258 if (WARN_ON(!hive))
3259 return;
3260
3261 /*
3262 * Use task barrier to synchronize all xgmi reset works across the
3263 * hive. task_barrier_enter and task_barrier_exit will block
3264 * until all the threads running the xgmi reset works reach
3265 * those points. task_barrier_full will do both blocks.
3266 */
3267 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3268
3269 task_barrier_enter(&hive->tb);
4a580877 3270 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
c6a6e2db
AG
3271
3272 if (adev->asic_reset_res)
3273 goto fail;
3274
3275 task_barrier_exit(&hive->tb);
4a580877 3276 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
c6a6e2db
AG
3277
3278 if (adev->asic_reset_res)
3279 goto fail;
43c4d576 3280
8bc7b360
HZ
3281 if (adev->mmhub.ras_funcs &&
3282 adev->mmhub.ras_funcs->reset_ras_error_count)
3283 adev->mmhub.ras_funcs->reset_ras_error_count(adev);
c6a6e2db
AG
3284 } else {
3285
3286 task_barrier_full(&hive->tb);
3287 adev->asic_reset_res = amdgpu_asic_reset(adev);
3288 }
ce316fa5 3289
c6a6e2db 3290fail:
d4535e2c 3291 if (adev->asic_reset_res)
fed184e9 3292 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
4a580877 3293 adev->asic_reset_res, adev_to_drm(adev)->unique);
d95e8e97 3294 amdgpu_put_xgmi_hive(hive);
d4535e2c
AG
3295}
3296
71f98027
AD
3297static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3298{
3299 char *input = amdgpu_lockup_timeout;
3300 char *timeout_setting = NULL;
3301 int index = 0;
3302 long timeout;
3303 int ret = 0;
3304
3305 /*
67387dfe
AD
3306 * By default timeout for non compute jobs is 10000
3307 * and 60000 for compute jobs.
71f98027 3308 * In SR-IOV or passthrough mode, timeout for compute
b7b2a316 3309 * jobs are 60000 by default.
71f98027
AD
3310 */
3311 adev->gfx_timeout = msecs_to_jiffies(10000);
3312 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
9882e278
ED
3313 if (amdgpu_sriov_vf(adev))
3314 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3315 msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
71f98027 3316 else
67387dfe 3317 adev->compute_timeout = msecs_to_jiffies(60000);
71f98027 3318
f440ff44 3319 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
71f98027 3320 while ((timeout_setting = strsep(&input, ",")) &&
f440ff44 3321 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
71f98027
AD
3322 ret = kstrtol(timeout_setting, 0, &timeout);
3323 if (ret)
3324 return ret;
3325
3326 if (timeout == 0) {
3327 index++;
3328 continue;
3329 } else if (timeout < 0) {
3330 timeout = MAX_SCHEDULE_TIMEOUT;
127aedf9
CK
3331 dev_warn(adev->dev, "lockup timeout disabled");
3332 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
71f98027
AD
3333 } else {
3334 timeout = msecs_to_jiffies(timeout);
3335 }
3336
3337 switch (index++) {
3338 case 0:
3339 adev->gfx_timeout = timeout;
3340 break;
3341 case 1:
3342 adev->compute_timeout = timeout;
3343 break;
3344 case 2:
3345 adev->sdma_timeout = timeout;
3346 break;
3347 case 3:
3348 adev->video_timeout = timeout;
3349 break;
3350 default:
3351 break;
3352 }
3353 }
3354 /*
3355 * There is only one value specified and
3356 * it should apply to all non-compute jobs.
3357 */
bcccee89 3358 if (index == 1) {
71f98027 3359 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
bcccee89
ED
3360 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3361 adev->compute_timeout = adev->gfx_timeout;
3362 }
71f98027
AD
3363 }
3364
3365 return ret;
3366}
d4535e2c 3367
77f3a5cd
ND
3368static const struct attribute *amdgpu_dev_attributes[] = {
3369 &dev_attr_product_name.attr,
3370 &dev_attr_product_number.attr,
3371 &dev_attr_serial_number.attr,
3372 &dev_attr_pcie_replay_count.attr,
3373 NULL
3374};
3375
d38ceaf9
AD
3376/**
3377 * amdgpu_device_init - initialize the driver
3378 *
3379 * @adev: amdgpu_device pointer
d38ceaf9
AD
3380 * @flags: driver flags
3381 *
3382 * Initializes the driver info and hw (all asics).
3383 * Returns 0 for success or an error on failure.
3384 * Called at driver startup.
3385 */
3386int amdgpu_device_init(struct amdgpu_device *adev,
d38ceaf9
AD
3387 uint32_t flags)
3388{
8aba21b7
LT
3389 struct drm_device *ddev = adev_to_drm(adev);
3390 struct pci_dev *pdev = adev->pdev;
d38ceaf9 3391 int r, i;
b98c6299 3392 bool px = false;
95844d20 3393 u32 max_MBps;
d38ceaf9
AD
3394
3395 adev->shutdown = false;
d38ceaf9 3396 adev->flags = flags;
4e66d7d2
YZ
3397
3398 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3399 adev->asic_type = amdgpu_force_asic_type;
3400 else
3401 adev->asic_type = flags & AMD_ASIC_MASK;
3402
d38ceaf9 3403 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
593aa2d2 3404 if (amdgpu_emu_mode == 1)
8bdab6bb 3405 adev->usec_timeout *= 10;
770d13b1 3406 adev->gmc.gart_size = 512 * 1024 * 1024;
d38ceaf9
AD
3407 adev->accel_working = false;
3408 adev->num_rings = 0;
3409 adev->mman.buffer_funcs = NULL;
3410 adev->mman.buffer_funcs_ring = NULL;
3411 adev->vm_manager.vm_pte_funcs = NULL;
0c88b430 3412 adev->vm_manager.vm_pte_num_scheds = 0;
132f34e4 3413 adev->gmc.gmc_funcs = NULL;
7bd939d0 3414 adev->harvest_ip_mask = 0x0;
f54d1867 3415 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
b8866c26 3416 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
d38ceaf9
AD
3417
3418 adev->smc_rreg = &amdgpu_invalid_rreg;
3419 adev->smc_wreg = &amdgpu_invalid_wreg;
3420 adev->pcie_rreg = &amdgpu_invalid_rreg;
3421 adev->pcie_wreg = &amdgpu_invalid_wreg;
36b9a952
HR
3422 adev->pciep_rreg = &amdgpu_invalid_rreg;
3423 adev->pciep_wreg = &amdgpu_invalid_wreg;
4fa1c6a6
TZ
3424 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3425 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
d38ceaf9
AD
3426 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3427 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3428 adev->didt_rreg = &amdgpu_invalid_rreg;
3429 adev->didt_wreg = &amdgpu_invalid_wreg;
ccdbb20a
RZ
3430 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3431 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
3432 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3433 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3434
3e39ab90
AD
3435 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3436 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3437 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
d38ceaf9
AD
3438
3439 /* mutex initialization are all done here so we
3440 * can recall function without having locking issues */
0e5ca0d1 3441 mutex_init(&adev->firmware.mutex);
d38ceaf9
AD
3442 mutex_init(&adev->pm.mutex);
3443 mutex_init(&adev->gfx.gpu_clock_mutex);
3444 mutex_init(&adev->srbm_mutex);
b8866c26 3445 mutex_init(&adev->gfx.pipe_reserve_mutex);
d23ee13f 3446 mutex_init(&adev->gfx.gfx_off_mutex);
d38ceaf9 3447 mutex_init(&adev->grbm_idx_mutex);
d38ceaf9 3448 mutex_init(&adev->mn_lock);
e23b74aa 3449 mutex_init(&adev->virt.vf_errors.lock);
d38ceaf9 3450 hash_init(adev->mn_hash);
53b3f8f4 3451 atomic_set(&adev->in_gpu_reset, 0);
6049db43 3452 init_rwsem(&adev->reset_sem);
32eaeae0 3453 mutex_init(&adev->psp.mutex);
bd052211 3454 mutex_init(&adev->notifier_lock);
d38ceaf9 3455
9f6a7857
HR
3456 r = amdgpu_device_init_apu_flags(adev);
3457 if (r)
3458 return r;
3459
912dfc84
EQ
3460 r = amdgpu_device_check_arguments(adev);
3461 if (r)
3462 return r;
d38ceaf9 3463
d38ceaf9
AD
3464 spin_lock_init(&adev->mmio_idx_lock);
3465 spin_lock_init(&adev->smc_idx_lock);
3466 spin_lock_init(&adev->pcie_idx_lock);
3467 spin_lock_init(&adev->uvd_ctx_idx_lock);
3468 spin_lock_init(&adev->didt_idx_lock);
ccdbb20a 3469 spin_lock_init(&adev->gc_cac_idx_lock);
16abb5d2 3470 spin_lock_init(&adev->se_cac_idx_lock);
d38ceaf9 3471 spin_lock_init(&adev->audio_endpt_idx_lock);
95844d20 3472 spin_lock_init(&adev->mm_stats.lock);
d38ceaf9 3473
0c4e7fa5
CZ
3474 INIT_LIST_HEAD(&adev->shadow_list);
3475 mutex_init(&adev->shadow_list_lock);
3476
655ce9cb 3477 INIT_LIST_HEAD(&adev->reset_list);
3478
beff74bc
AD
3479 INIT_DELAYED_WORK(&adev->delayed_init_work,
3480 amdgpu_device_delayed_init_work_handler);
1e317b99
RZ
3481 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3482 amdgpu_device_delay_enable_gfx_off);
2dc80b00 3483
d4535e2c
AG
3484 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3485
d23ee13f 3486 adev->gfx.gfx_off_req_count = 1;
b6e79d9a 3487 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
b1ddf548 3488
b265bdbd
EQ
3489 atomic_set(&adev->throttling_logging_enabled, 1);
3490 /*
3491 * If throttling continues, logging will be performed every minute
3492 * to avoid log flooding. "-1" is subtracted since the thermal
3493 * throttling interrupt comes every second. Thus, the total logging
3494 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3495 * for throttling interrupt) = 60 seconds.
3496 */
3497 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3498 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3499
0fa49558
AX
3500 /* Registers mapping */
3501 /* TODO: block userspace mapping of io register */
da69c161
KW
3502 if (adev->asic_type >= CHIP_BONAIRE) {
3503 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3504 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3505 } else {
3506 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3507 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3508 }
d38ceaf9 3509
6c08e0ef
EQ
3510 for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3511 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3512
d38ceaf9
AD
3513 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3514 if (adev->rmmio == NULL) {
3515 return -ENOMEM;
3516 }
3517 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3518 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3519
5494d864
AD
3520 amdgpu_device_get_pcie_info(adev);
3521
b239c017
JX
3522 if (amdgpu_mcbp)
3523 DRM_INFO("MCBP is enabled\n");
3524
5f84cc63
JX
3525 if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
3526 adev->enable_mes = true;
3527
3aa0115d
ML
3528 /* detect hw virtualization here */
3529 amdgpu_detect_virtualization(adev);
3530
dffa11b4
ML
3531 r = amdgpu_device_get_job_timeout_settings(adev);
3532 if (r) {
3533 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
4ef87d8f 3534 return r;
a190d1c7
XY
3535 }
3536
d38ceaf9 3537 /* early init functions */
06ec9070 3538 r = amdgpu_device_ip_early_init(adev);
d38ceaf9 3539 if (r)
4ef87d8f 3540 return r;
d38ceaf9 3541
8e6d0b69 3542 /* enable PCIE atomic ops */
3543 if (amdgpu_sriov_vf(adev))
3544 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3545 adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_enabled_flags ==
3546 (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3547 else
3548 adev->have_atomics_support =
3549 !pci_enable_atomic_ops_to_root(adev->pdev,
3550 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3551 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3552 if (!adev->have_atomics_support)
3553 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3554
6585661d
OZ
3555 /* doorbell bar mapping and doorbell index init*/
3556 amdgpu_device_doorbell_init(adev);
3557
9475a943
SL
3558 if (amdgpu_emu_mode == 1) {
3559 /* post the asic on emulation mode */
3560 emu_soc_asic_init(adev);
bfca0289 3561 goto fence_driver_init;
9475a943 3562 }
bfca0289 3563
04442bf7
LL
3564 amdgpu_reset_init(adev);
3565
4e99a44e
ML
3566 /* detect if we are with an SRIOV vbios */
3567 amdgpu_device_detect_sriov_bios(adev);
048765ad 3568
95e8e59e
AD
3569 /* check if we need to reset the asic
3570 * E.g., driver was not cleanly unloaded previously, etc.
3571 */
f14899fd 3572 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
e3c1b071 3573 if (adev->gmc.xgmi.num_physical_nodes) {
3574 dev_info(adev->dev, "Pending hive reset.\n");
3575 adev->gmc.xgmi.pending_reset = true;
3576 /* Only need to init necessary block for SMU to handle the reset */
3577 for (i = 0; i < adev->num_ip_blocks; i++) {
3578 if (!adev->ip_blocks[i].status.valid)
3579 continue;
3580 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3581 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3582 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3583 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
751f43e7 3584 DRM_DEBUG("IP %s disabled for hw_init.\n",
e3c1b071 3585 adev->ip_blocks[i].version->funcs->name);
3586 adev->ip_blocks[i].status.hw = true;
3587 }
3588 }
3589 } else {
3590 r = amdgpu_asic_reset(adev);
3591 if (r) {
3592 dev_err(adev->dev, "asic reset on init failed\n");
3593 goto failed;
3594 }
95e8e59e
AD
3595 }
3596 }
3597
8f66090b 3598 pci_enable_pcie_error_reporting(adev->pdev);
c9a6b82f 3599
d38ceaf9 3600 /* Post card if necessary */
39c640c0 3601 if (amdgpu_device_need_post(adev)) {
d38ceaf9 3602 if (!adev->bios) {
bec86378 3603 dev_err(adev->dev, "no vBIOS found\n");
83ba126a
AD
3604 r = -EINVAL;
3605 goto failed;
d38ceaf9 3606 }
bec86378 3607 DRM_INFO("GPU posting now...\n");
4d2997ab 3608 r = amdgpu_device_asic_init(adev);
4e99a44e
ML
3609 if (r) {
3610 dev_err(adev->dev, "gpu post error!\n");
3611 goto failed;
3612 }
d38ceaf9
AD
3613 }
3614
88b64e95
AD
3615 if (adev->is_atom_fw) {
3616 /* Initialize clocks */
3617 r = amdgpu_atomfirmware_get_clock_info(adev);
3618 if (r) {
3619 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
e23b74aa 3620 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
88b64e95
AD
3621 goto failed;
3622 }
3623 } else {
a5bde2f9
AD
3624 /* Initialize clocks */
3625 r = amdgpu_atombios_get_clock_info(adev);
3626 if (r) {
3627 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
e23b74aa 3628 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
89041940 3629 goto failed;
a5bde2f9
AD
3630 }
3631 /* init i2c buses */
4562236b
HW
3632 if (!amdgpu_device_has_dc_support(adev))
3633 amdgpu_atombios_i2c_init(adev);
2c1a2784 3634 }
d38ceaf9 3635
bfca0289 3636fence_driver_init:
d38ceaf9 3637 /* Fence driver */
067f44c8 3638 r = amdgpu_fence_driver_sw_init(adev);
2c1a2784 3639 if (r) {
067f44c8 3640 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
e23b74aa 3641 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
83ba126a 3642 goto failed;
2c1a2784 3643 }
d38ceaf9
AD
3644
3645 /* init the mode config */
4a580877 3646 drm_mode_config_init(adev_to_drm(adev));
d38ceaf9 3647
06ec9070 3648 r = amdgpu_device_ip_init(adev);
d38ceaf9 3649 if (r) {
8840a387 3650 /* failed in exclusive mode due to timeout */
3651 if (amdgpu_sriov_vf(adev) &&
3652 !amdgpu_sriov_runtime(adev) &&
3653 amdgpu_virt_mmio_blocked(adev) &&
3654 !amdgpu_virt_wait_reset(adev)) {
3655 dev_err(adev->dev, "VF exclusive mode timeout\n");
1daee8b4
PD
3656 /* Don't send request since VF is inactive. */
3657 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3658 adev->virt.ops = NULL;
8840a387 3659 r = -EAGAIN;
970fd197 3660 goto release_ras_con;
8840a387 3661 }
06ec9070 3662 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
e23b74aa 3663 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
970fd197 3664 goto release_ras_con;
d38ceaf9
AD
3665 }
3666
8d35a259
LG
3667 amdgpu_fence_driver_hw_init(adev);
3668
d69b8971
YZ
3669 dev_info(adev->dev,
3670 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
d7f72fe4
YZ
3671 adev->gfx.config.max_shader_engines,
3672 adev->gfx.config.max_sh_per_se,
3673 adev->gfx.config.max_cu_per_sh,
3674 adev->gfx.cu_info.number);
3675
d38ceaf9
AD
3676 adev->accel_working = true;
3677
e59c0205
AX
3678 amdgpu_vm_check_compute_bug(adev);
3679
95844d20
MO
3680 /* Initialize the buffer migration limit. */
3681 if (amdgpu_moverate >= 0)
3682 max_MBps = amdgpu_moverate;
3683 else
3684 max_MBps = 8; /* Allow 8 MB/s. */
3685 /* Get a log2 for easy divisions. */
3686 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3687
d2f52ac8 3688 r = amdgpu_pm_sysfs_init(adev);
7c868b59
YT
3689 if (r) {
3690 adev->pm_sysfs_en = false;
d2f52ac8 3691 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
7c868b59
YT
3692 } else
3693 adev->pm_sysfs_en = true;
d2f52ac8 3694
5bb23532 3695 r = amdgpu_ucode_sysfs_init(adev);
7c868b59
YT
3696 if (r) {
3697 adev->ucode_sysfs_en = false;
5bb23532 3698 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
7c868b59
YT
3699 } else
3700 adev->ucode_sysfs_en = true;
5bb23532 3701
d38ceaf9
AD
3702 if ((amdgpu_testing & 1)) {
3703 if (adev->accel_working)
3704 amdgpu_test_moves(adev);
3705 else
3706 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3707 }
d38ceaf9
AD
3708 if (amdgpu_benchmarking) {
3709 if (adev->accel_working)
3710 amdgpu_benchmark(adev, amdgpu_benchmarking);
3711 else
3712 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3713 }
3714
b0adca4d
EQ
3715 /*
3716 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3717 * Otherwise the mgpu fan boost feature will be skipped due to the
3718 * gpu instance is counted less.
3719 */
3720 amdgpu_register_gpu_instance(adev);
3721
d38ceaf9
AD
3722 /* enable clockgating, etc. after ib tests, etc. since some blocks require
3723 * explicit gating rather than handling it automatically.
3724 */
e3c1b071 3725 if (!adev->gmc.xgmi.pending_reset) {
3726 r = amdgpu_device_ip_late_init(adev);
3727 if (r) {
3728 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3729 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
970fd197 3730 goto release_ras_con;
e3c1b071 3731 }
3732 /* must succeed. */
3733 amdgpu_ras_resume(adev);
3734 queue_delayed_work(system_wq, &adev->delayed_init_work,
3735 msecs_to_jiffies(AMDGPU_RESUME_MS));
2c1a2784 3736 }
d38ceaf9 3737
2c738637
ML
3738 if (amdgpu_sriov_vf(adev))
3739 flush_delayed_work(&adev->delayed_init_work);
3740
77f3a5cd 3741 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
5aea5327 3742 if (r)
77f3a5cd 3743 dev_err(adev->dev, "Could not create amdgpu device attr\n");
bd607166 3744
d155bef0
AB
3745 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3746 r = amdgpu_pmu_init(adev);
9c7c85f7
JK
3747 if (r)
3748 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3749
c1dd4aa6
AG
3750 /* Have stored pci confspace at hand for restore in sudden PCI error */
3751 if (amdgpu_device_cache_pci_state(adev->pdev))
3752 pci_restore_state(pdev);
3753
8c3dd61c
KHF
3754 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3755 /* this will fail for cards that aren't VGA class devices, just
3756 * ignore it */
3757 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
bf44e8ce 3758 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
8c3dd61c
KHF
3759
3760 if (amdgpu_device_supports_px(ddev)) {
3761 px = true;
3762 vga_switcheroo_register_client(adev->pdev,
3763 &amdgpu_switcheroo_ops, px);
3764 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3765 }
3766
e3c1b071 3767 if (adev->gmc.xgmi.pending_reset)
3768 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3769 msecs_to_jiffies(AMDGPU_RESUME_MS));
3770
d38ceaf9 3771 return 0;
83ba126a 3772
970fd197
SY
3773release_ras_con:
3774 amdgpu_release_ras_context(adev);
3775
83ba126a 3776failed:
89041940 3777 amdgpu_vf_error_trans_all(adev);
8840a387 3778
83ba126a 3779 return r;
d38ceaf9
AD
3780}
3781
07775fc1
AG
3782static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3783{
3784 /* Clear all CPU mappings pointing to this device */
3785 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3786
3787 /* Unmap all mapped bars - Doorbell, registers and VRAM */
3788 amdgpu_device_doorbell_fini(adev);
3789
3790 iounmap(adev->rmmio);
3791 adev->rmmio = NULL;
3792 if (adev->mman.aper_base_kaddr)
3793 iounmap(adev->mman.aper_base_kaddr);
3794 adev->mman.aper_base_kaddr = NULL;
3795
3796 /* Memory manager related */
3797 if (!adev->gmc.xgmi.connected_to_cpu) {
3798 arch_phys_wc_del(adev->gmc.vram_mtrr);
3799 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3800 }
3801}
3802
d38ceaf9
AD
3803/**
3804 * amdgpu_device_fini - tear down the driver
3805 *
3806 * @adev: amdgpu_device pointer
3807 *
3808 * Tear down the driver info (all asics).
3809 * Called at driver shutdown.
3810 */
72c8c97b 3811void amdgpu_device_fini_hw(struct amdgpu_device *adev)
d38ceaf9 3812{
aac89168 3813 dev_info(adev->dev, "amdgpu: finishing device.\n");
9f875167 3814 flush_delayed_work(&adev->delayed_init_work);
691191a2
YW
3815 if (adev->mman.initialized) {
3816 flush_delayed_work(&adev->mman.bdev.wq);
e78b3197 3817 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
691191a2 3818 }
d0d13fe8 3819 adev->shutdown = true;
9f875167 3820
752c683d
ML
3821 /* make sure IB test finished before entering exclusive mode
3822 * to avoid preemption on IB test
3823 * */
519b8b76 3824 if (amdgpu_sriov_vf(adev)) {
752c683d 3825 amdgpu_virt_request_full_gpu(adev, false);
519b8b76
BZ
3826 amdgpu_virt_fini_data_exchange(adev);
3827 }
752c683d 3828
e5b03032
ML
3829 /* disable all interrupts */
3830 amdgpu_irq_disable_all(adev);
ff97cba8 3831 if (adev->mode_info.mode_config_initialized){
700de2c8 3832 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
4a580877 3833 drm_helper_force_disable_all(adev_to_drm(adev));
ff97cba8 3834 else
4a580877 3835 drm_atomic_helper_shutdown(adev_to_drm(adev));
ff97cba8 3836 }
8d35a259 3837 amdgpu_fence_driver_hw_fini(adev);
72c8c97b 3838
7c868b59
YT
3839 if (adev->pm_sysfs_en)
3840 amdgpu_pm_sysfs_fini(adev);
72c8c97b
AG
3841 if (adev->ucode_sysfs_en)
3842 amdgpu_ucode_sysfs_fini(adev);
3843 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3844
232d1d43
SY
3845 /* disable ras feature must before hw fini */
3846 amdgpu_ras_pre_fini(adev);
3847
e9669fb7 3848 amdgpu_device_ip_fini_early(adev);
d10d0daa 3849
a3848df6
YW
3850 amdgpu_irq_fini_hw(adev);
3851
894c6890
AG
3852 ttm_device_clear_dma_mappings(&adev->mman.bdev);
3853
d10d0daa 3854 amdgpu_gart_dummy_page_fini(adev);
07775fc1
AG
3855
3856 amdgpu_device_unmap_mmio(adev);
72c8c97b
AG
3857}
3858
3859void amdgpu_device_fini_sw(struct amdgpu_device *adev)
3860{
8d35a259 3861 amdgpu_fence_driver_sw_fini(adev);
a5c5d8d5 3862 amdgpu_device_ip_fini(adev);
75e1658e
ND
3863 release_firmware(adev->firmware.gpu_info_fw);
3864 adev->firmware.gpu_info_fw = NULL;
d38ceaf9 3865 adev->accel_working = false;
04442bf7
LL
3866
3867 amdgpu_reset_fini(adev);
3868
d38ceaf9 3869 /* free i2c buses */
4562236b
HW
3870 if (!amdgpu_device_has_dc_support(adev))
3871 amdgpu_i2c_fini(adev);
bfca0289
SL
3872
3873 if (amdgpu_emu_mode != 1)
3874 amdgpu_atombios_fini(adev);
3875
d38ceaf9
AD
3876 kfree(adev->bios);
3877 adev->bios = NULL;
b98c6299 3878 if (amdgpu_device_supports_px(adev_to_drm(adev))) {
84c8b22e 3879 vga_switcheroo_unregister_client(adev->pdev);
83ba126a 3880 vga_switcheroo_fini_domain_pm_ops(adev->dev);
b98c6299 3881 }
38d6be81 3882 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
b8779475 3883 vga_client_unregister(adev->pdev);
e9bc1bf7 3884
d155bef0
AB
3885 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3886 amdgpu_pmu_fini(adev);
72de33f8 3887 if (adev->mman.discovery_bin)
a190d1c7 3888 amdgpu_discovery_fini(adev);
72c8c97b
AG
3889
3890 kfree(adev->pci_state);
3891
d38ceaf9
AD
3892}
3893
58144d28
ND
3894/**
3895 * amdgpu_device_evict_resources - evict device resources
3896 * @adev: amdgpu device object
3897 *
3898 * Evicts all ttm device resources(vram BOs, gart table) from the lru list
3899 * of the vram memory type. Mainly used for evicting device resources
3900 * at suspend time.
3901 *
3902 */
3903static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
3904{
3905 /* No need to evict vram on APUs for suspend to ram */
3906 if (adev->in_s3 && (adev->flags & AMD_IS_APU))
3907 return;
3908
3909 if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM))
3910 DRM_WARN("evicting device resources failed\n");
3911
3912}
d38ceaf9
AD
3913
3914/*
3915 * Suspend & resume.
3916 */
3917/**
810ddc3a 3918 * amdgpu_device_suspend - initiate device suspend
d38ceaf9 3919 *
87e3f136 3920 * @dev: drm dev pointer
87e3f136 3921 * @fbcon : notify the fbdev of suspend
d38ceaf9
AD
3922 *
3923 * Puts the hw in the suspend state (all asics).
3924 * Returns 0 for success or an error on failure.
3925 * Called at driver suspend.
3926 */
de185019 3927int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
d38ceaf9 3928{
a2e15b0e 3929 struct amdgpu_device *adev = drm_to_adev(dev);
d38ceaf9 3930
d38ceaf9
AD
3931 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3932 return 0;
3933
44779b43 3934 adev->in_suspend = true;
3fa8f89d
S
3935
3936 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
3937 DRM_WARN("smart shift update failed\n");
3938
d38ceaf9
AD
3939 drm_kms_helper_poll_disable(dev);
3940
5f818173 3941 if (fbcon)
087451f3 3942 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
5f818173 3943
beff74bc 3944 cancel_delayed_work_sync(&adev->delayed_init_work);
a5459475 3945
5e6932fe 3946 amdgpu_ras_suspend(adev);
3947
2196927b 3948 amdgpu_device_ip_suspend_phase1(adev);
fe1053b7 3949
5d3a2d95
AD
3950 if (!adev->in_s0ix)
3951 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
94fa5660 3952
58144d28
ND
3953 /* First evict vram memory */
3954 amdgpu_device_evict_resources(adev);
d38ceaf9 3955
8d35a259 3956 amdgpu_fence_driver_hw_fini(adev);
d38ceaf9 3957
2196927b 3958 amdgpu_device_ip_suspend_phase2(adev);
58144d28
ND
3959 /* This second call to evict device resources is to evict
3960 * the gart page table using the CPU.
a0a71e49 3961 */
58144d28 3962 amdgpu_device_evict_resources(adev);
d38ceaf9 3963
d38ceaf9
AD
3964 return 0;
3965}
3966
3967/**
810ddc3a 3968 * amdgpu_device_resume - initiate device resume
d38ceaf9 3969 *
87e3f136 3970 * @dev: drm dev pointer
87e3f136 3971 * @fbcon : notify the fbdev of resume
d38ceaf9
AD
3972 *
3973 * Bring the hw back to operating state (all asics).
3974 * Returns 0 for success or an error on failure.
3975 * Called at driver resume.
3976 */
de185019 3977int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
d38ceaf9 3978{
1348969a 3979 struct amdgpu_device *adev = drm_to_adev(dev);
03161a6e 3980 int r = 0;
d38ceaf9
AD
3981
3982 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3983 return 0;
3984
62498733 3985 if (adev->in_s0ix)
628c36d7
PL
3986 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
3987
d38ceaf9 3988 /* post card */
39c640c0 3989 if (amdgpu_device_need_post(adev)) {
4d2997ab 3990 r = amdgpu_device_asic_init(adev);
74b0b157 3991 if (r)
aac89168 3992 dev_err(adev->dev, "amdgpu asic init failed\n");
74b0b157 3993 }
d38ceaf9 3994
06ec9070 3995 r = amdgpu_device_ip_resume(adev);
e6707218 3996 if (r) {
aac89168 3997 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4d3b9ae5 3998 return r;
e6707218 3999 }
8d35a259 4000 amdgpu_fence_driver_hw_init(adev);
5ceb54c6 4001
06ec9070 4002 r = amdgpu_device_ip_late_init(adev);
03161a6e 4003 if (r)
4d3b9ae5 4004 return r;
d38ceaf9 4005
beff74bc
AD
4006 queue_delayed_work(system_wq, &adev->delayed_init_work,
4007 msecs_to_jiffies(AMDGPU_RESUME_MS));
4008
5d3a2d95
AD
4009 if (!adev->in_s0ix) {
4010 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4011 if (r)
4012 return r;
4013 }
756e6880 4014
96a5d8d4 4015 /* Make sure IB tests flushed */
beff74bc 4016 flush_delayed_work(&adev->delayed_init_work);
96a5d8d4 4017
a2e15b0e 4018 if (fbcon)
087451f3 4019 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
d38ceaf9
AD
4020
4021 drm_kms_helper_poll_enable(dev);
23a1a9e5 4022
5e6932fe 4023 amdgpu_ras_resume(adev);
4024
23a1a9e5
L
4025 /*
4026 * Most of the connector probing functions try to acquire runtime pm
4027 * refs to ensure that the GPU is powered on when connector polling is
4028 * performed. Since we're calling this from a runtime PM callback,
4029 * trying to acquire rpm refs will cause us to deadlock.
4030 *
4031 * Since we're guaranteed to be holding the rpm lock, it's safe to
4032 * temporarily disable the rpm helpers so this doesn't deadlock us.
4033 */
4034#ifdef CONFIG_PM
4035 dev->dev->power.disable_depth++;
4036#endif
4562236b
HW
4037 if (!amdgpu_device_has_dc_support(adev))
4038 drm_helper_hpd_irq_event(dev);
4039 else
4040 drm_kms_helper_hotplug_event(dev);
23a1a9e5
L
4041#ifdef CONFIG_PM
4042 dev->dev->power.disable_depth--;
4043#endif
44779b43
RZ
4044 adev->in_suspend = false;
4045
3fa8f89d
S
4046 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4047 DRM_WARN("smart shift update failed\n");
4048
4d3b9ae5 4049 return 0;
d38ceaf9
AD
4050}
4051
e3ecdffa
AD
4052/**
4053 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4054 *
4055 * @adev: amdgpu_device pointer
4056 *
4057 * The list of all the hardware IPs that make up the asic is walked and
4058 * the check_soft_reset callbacks are run. check_soft_reset determines
4059 * if the asic is still hung or not.
4060 * Returns true if any of the IPs are still in a hung state, false if not.
4061 */
06ec9070 4062static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
63fbf42f
CZ
4063{
4064 int i;
4065 bool asic_hang = false;
4066
f993d628
ML
4067 if (amdgpu_sriov_vf(adev))
4068 return true;
4069
8bc04c29
AD
4070 if (amdgpu_asic_need_full_reset(adev))
4071 return true;
4072
63fbf42f 4073 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 4074 if (!adev->ip_blocks[i].status.valid)
63fbf42f 4075 continue;
a1255107
AD
4076 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4077 adev->ip_blocks[i].status.hang =
4078 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4079 if (adev->ip_blocks[i].status.hang) {
aac89168 4080 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
63fbf42f
CZ
4081 asic_hang = true;
4082 }
4083 }
4084 return asic_hang;
4085}
4086
e3ecdffa
AD
4087/**
4088 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4089 *
4090 * @adev: amdgpu_device pointer
4091 *
4092 * The list of all the hardware IPs that make up the asic is walked and the
4093 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
4094 * handles any IP specific hardware or software state changes that are
4095 * necessary for a soft reset to succeed.
4096 * Returns 0 on success, negative error code on failure.
4097 */
06ec9070 4098static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
d31a501e
CZ
4099{
4100 int i, r = 0;
4101
4102 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 4103 if (!adev->ip_blocks[i].status.valid)
d31a501e 4104 continue;
a1255107
AD
4105 if (adev->ip_blocks[i].status.hang &&
4106 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4107 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
d31a501e
CZ
4108 if (r)
4109 return r;
4110 }
4111 }
4112
4113 return 0;
4114}
4115
e3ecdffa
AD
4116/**
4117 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4118 *
4119 * @adev: amdgpu_device pointer
4120 *
4121 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
4122 * reset is necessary to recover.
4123 * Returns true if a full asic reset is required, false if not.
4124 */
06ec9070 4125static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
35d782fe 4126{
da146d3b
AD
4127 int i;
4128
8bc04c29
AD
4129 if (amdgpu_asic_need_full_reset(adev))
4130 return true;
4131
da146d3b 4132 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 4133 if (!adev->ip_blocks[i].status.valid)
da146d3b 4134 continue;
a1255107
AD
4135 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4136 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4137 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
98512bb8
KW
4138 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4139 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
a1255107 4140 if (adev->ip_blocks[i].status.hang) {
aac89168 4141 dev_info(adev->dev, "Some block need full reset!\n");
da146d3b
AD
4142 return true;
4143 }
4144 }
35d782fe
CZ
4145 }
4146 return false;
4147}
4148
e3ecdffa
AD
4149/**
4150 * amdgpu_device_ip_soft_reset - do a soft reset
4151 *
4152 * @adev: amdgpu_device pointer
4153 *
4154 * The list of all the hardware IPs that make up the asic is walked and the
4155 * soft_reset callbacks are run if the block is hung. soft_reset handles any
4156 * IP specific hardware or software state changes that are necessary to soft
4157 * reset the IP.
4158 * Returns 0 on success, negative error code on failure.
4159 */
06ec9070 4160static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
35d782fe
CZ
4161{
4162 int i, r = 0;
4163
4164 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 4165 if (!adev->ip_blocks[i].status.valid)
35d782fe 4166 continue;
a1255107
AD
4167 if (adev->ip_blocks[i].status.hang &&
4168 adev->ip_blocks[i].version->funcs->soft_reset) {
4169 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
35d782fe
CZ
4170 if (r)
4171 return r;
4172 }
4173 }
4174
4175 return 0;
4176}
4177
e3ecdffa
AD
4178/**
4179 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4180 *
4181 * @adev: amdgpu_device pointer
4182 *
4183 * The list of all the hardware IPs that make up the asic is walked and the
4184 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
4185 * handles any IP specific hardware or software state changes that are
4186 * necessary after the IP has been soft reset.
4187 * Returns 0 on success, negative error code on failure.
4188 */
06ec9070 4189static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
35d782fe
CZ
4190{
4191 int i, r = 0;
4192
4193 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 4194 if (!adev->ip_blocks[i].status.valid)
35d782fe 4195 continue;
a1255107
AD
4196 if (adev->ip_blocks[i].status.hang &&
4197 adev->ip_blocks[i].version->funcs->post_soft_reset)
4198 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
35d782fe
CZ
4199 if (r)
4200 return r;
4201 }
4202
4203 return 0;
4204}
4205
e3ecdffa 4206/**
c33adbc7 4207 * amdgpu_device_recover_vram - Recover some VRAM contents
e3ecdffa
AD
4208 *
4209 * @adev: amdgpu_device pointer
4210 *
4211 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
4212 * restore things like GPUVM page tables after a GPU reset where
4213 * the contents of VRAM might be lost.
403009bf
CK
4214 *
4215 * Returns:
4216 * 0 on success, negative error code on failure.
e3ecdffa 4217 */
c33adbc7 4218static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
c41d1cf6 4219{
c41d1cf6 4220 struct dma_fence *fence = NULL, *next = NULL;
403009bf 4221 struct amdgpu_bo *shadow;
e18aaea7 4222 struct amdgpu_bo_vm *vmbo;
403009bf 4223 long r = 1, tmo;
c41d1cf6
ML
4224
4225 if (amdgpu_sriov_runtime(adev))
b045d3af 4226 tmo = msecs_to_jiffies(8000);
c41d1cf6
ML
4227 else
4228 tmo = msecs_to_jiffies(100);
4229
aac89168 4230 dev_info(adev->dev, "recover vram bo from shadow start\n");
c41d1cf6 4231 mutex_lock(&adev->shadow_list_lock);
e18aaea7
ND
4232 list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4233 shadow = &vmbo->bo;
403009bf 4234 /* No need to recover an evicted BO */
d3116756
CK
4235 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4236 shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4237 shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
403009bf
CK
4238 continue;
4239
4240 r = amdgpu_bo_restore_shadow(shadow, &next);
4241 if (r)
4242 break;
4243
c41d1cf6 4244 if (fence) {
1712fb1a 4245 tmo = dma_fence_wait_timeout(fence, false, tmo);
403009bf
CK
4246 dma_fence_put(fence);
4247 fence = next;
1712fb1a 4248 if (tmo == 0) {
4249 r = -ETIMEDOUT;
c41d1cf6 4250 break;
1712fb1a 4251 } else if (tmo < 0) {
4252 r = tmo;
4253 break;
4254 }
403009bf
CK
4255 } else {
4256 fence = next;
c41d1cf6 4257 }
c41d1cf6
ML
4258 }
4259 mutex_unlock(&adev->shadow_list_lock);
4260
403009bf
CK
4261 if (fence)
4262 tmo = dma_fence_wait_timeout(fence, false, tmo);
c41d1cf6
ML
4263 dma_fence_put(fence);
4264
1712fb1a 4265 if (r < 0 || tmo <= 0) {
aac89168 4266 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
403009bf
CK
4267 return -EIO;
4268 }
c41d1cf6 4269
aac89168 4270 dev_info(adev->dev, "recover vram bo from shadow done\n");
403009bf 4271 return 0;
c41d1cf6
ML
4272}
4273
a90ad3c2 4274
e3ecdffa 4275/**
06ec9070 4276 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
5740682e 4277 *
982a820b 4278 * @adev: amdgpu_device pointer
87e3f136 4279 * @from_hypervisor: request from hypervisor
5740682e
ML
4280 *
4281 * do VF FLR and reinitialize Asic
3f48c681 4282 * return 0 means succeeded otherwise failed
e3ecdffa
AD
4283 */
4284static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4285 bool from_hypervisor)
5740682e
ML
4286{
4287 int r;
4288
992110d7 4289 amdgpu_amdkfd_pre_reset(adev);
4290
5740682e
ML
4291 if (from_hypervisor)
4292 r = amdgpu_virt_request_full_gpu(adev, true);
4293 else
4294 r = amdgpu_virt_reset_gpu(adev);
4295 if (r)
4296 return r;
a90ad3c2
ML
4297
4298 /* Resume IP prior to SMC */
06ec9070 4299 r = amdgpu_device_ip_reinit_early_sriov(adev);
5740682e
ML
4300 if (r)
4301 goto error;
a90ad3c2 4302
c9ffa427 4303 amdgpu_virt_init_data_exchange(adev);
a90ad3c2 4304 /* we need recover gart prior to run SMC/CP/SDMA resume */
6c28aed6 4305 amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
a90ad3c2 4306
7a3e0bb2
RZ
4307 r = amdgpu_device_fw_loading(adev);
4308 if (r)
4309 return r;
4310
a90ad3c2 4311 /* now we are okay to resume SMC/CP/SDMA */
06ec9070 4312 r = amdgpu_device_ip_reinit_late_sriov(adev);
5740682e
ML
4313 if (r)
4314 goto error;
a90ad3c2
ML
4315
4316 amdgpu_irq_gpu_reset_resume_helper(adev);
5740682e 4317 r = amdgpu_ib_ring_tests(adev);
992110d7 4318 amdgpu_amdkfd_post_reset(adev);
a90ad3c2 4319
abc34253 4320error:
c41d1cf6 4321 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
e3526257 4322 amdgpu_inc_vram_lost(adev);
c33adbc7 4323 r = amdgpu_device_recover_vram(adev);
a90ad3c2 4324 }
437f3e0b 4325 amdgpu_virt_release_full_gpu(adev, true);
a90ad3c2
ML
4326
4327 return r;
4328}
4329
9a1cddd6 4330/**
4331 * amdgpu_device_has_job_running - check if there is any job in mirror list
4332 *
982a820b 4333 * @adev: amdgpu_device pointer
9a1cddd6 4334 *
4335 * check if there is any job in mirror list
4336 */
4337bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4338{
4339 int i;
4340 struct drm_sched_job *job;
4341
4342 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4343 struct amdgpu_ring *ring = adev->rings[i];
4344
4345 if (!ring || !ring->sched.thread)
4346 continue;
4347
4348 spin_lock(&ring->sched.job_list_lock);
6efa4b46
LT
4349 job = list_first_entry_or_null(&ring->sched.pending_list,
4350 struct drm_sched_job, list);
9a1cddd6 4351 spin_unlock(&ring->sched.job_list_lock);
4352 if (job)
4353 return true;
4354 }
4355 return false;
4356}
4357
12938fad
CK
4358/**
4359 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4360 *
982a820b 4361 * @adev: amdgpu_device pointer
12938fad
CK
4362 *
4363 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4364 * a hung GPU.
4365 */
4366bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4367{
4368 if (!amdgpu_device_ip_check_soft_reset(adev)) {
aac89168 4369 dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
12938fad
CK
4370 return false;
4371 }
4372
3ba7b418
AG
4373 if (amdgpu_gpu_recovery == 0)
4374 goto disabled;
4375
4376 if (amdgpu_sriov_vf(adev))
4377 return true;
4378
4379 if (amdgpu_gpu_recovery == -1) {
4380 switch (adev->asic_type) {
fc42d47c
AG
4381 case CHIP_BONAIRE:
4382 case CHIP_HAWAII:
3ba7b418
AG
4383 case CHIP_TOPAZ:
4384 case CHIP_TONGA:
4385 case CHIP_FIJI:
4386 case CHIP_POLARIS10:
4387 case CHIP_POLARIS11:
4388 case CHIP_POLARIS12:
4389 case CHIP_VEGAM:
4390 case CHIP_VEGA20:
4391 case CHIP_VEGA10:
4392 case CHIP_VEGA12:
c43b849f 4393 case CHIP_RAVEN:
e9d4cf91 4394 case CHIP_ARCTURUS:
2cb44fb0 4395 case CHIP_RENOIR:
658c6639
AD
4396 case CHIP_NAVI10:
4397 case CHIP_NAVI14:
4398 case CHIP_NAVI12:
131a3c74 4399 case CHIP_SIENNA_CICHLID:
665fe4dc 4400 case CHIP_NAVY_FLOUNDER:
27859ee3 4401 case CHIP_DIMGREY_CAVEFISH:
a2f55040 4402 case CHIP_BEIGE_GOBY:
fe68ceef 4403 case CHIP_VANGOGH:
ea4e96a7 4404 case CHIP_ALDEBARAN:
3ba7b418
AG
4405 break;
4406 default:
4407 goto disabled;
4408 }
12938fad
CK
4409 }
4410
4411 return true;
3ba7b418
AG
4412
4413disabled:
aac89168 4414 dev_info(adev->dev, "GPU recovery disabled.\n");
3ba7b418 4415 return false;
12938fad
CK
4416}
4417
5c03e584
FX
4418int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4419{
4420 u32 i;
4421 int ret = 0;
4422
4423 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4424
4425 dev_info(adev->dev, "GPU mode1 reset\n");
4426
4427 /* disable BM */
4428 pci_clear_master(adev->pdev);
4429
4430 amdgpu_device_cache_pci_state(adev->pdev);
4431
4432 if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4433 dev_info(adev->dev, "GPU smu mode1 reset\n");
4434 ret = amdgpu_dpm_mode1_reset(adev);
4435 } else {
4436 dev_info(adev->dev, "GPU psp mode1 reset\n");
4437 ret = psp_gpu_reset(adev);
4438 }
4439
4440 if (ret)
4441 dev_err(adev->dev, "GPU mode1 reset failed\n");
4442
4443 amdgpu_device_load_pci_state(adev->pdev);
4444
4445 /* wait for asic to come out of reset */
4446 for (i = 0; i < adev->usec_timeout; i++) {
4447 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4448
4449 if (memsize != 0xffffffff)
4450 break;
4451 udelay(1);
4452 }
4453
4454 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4455 return ret;
4456}
5c6dd71e 4457
e3c1b071 4458int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
04442bf7 4459 struct amdgpu_reset_context *reset_context)
26bc5340 4460{
c530b02f 4461 int i, j, r = 0;
04442bf7
LL
4462 struct amdgpu_job *job = NULL;
4463 bool need_full_reset =
4464 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4465
4466 if (reset_context->reset_req_dev == adev)
4467 job = reset_context->job;
71182665 4468
b602ca5f
TZ
4469 if (amdgpu_sriov_vf(adev)) {
4470 /* stop the data exchange thread */
4471 amdgpu_virt_fini_data_exchange(adev);
4472 }
4473
71182665 4474 /* block all schedulers and reset given job's ring */
0875dc9e
CZ
4475 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4476 struct amdgpu_ring *ring = adev->rings[i];
4477
51687759 4478 if (!ring || !ring->sched.thread)
0875dc9e 4479 continue;
5740682e 4480
c530b02f
JZ
4481 /*clear job fence from fence drv to avoid force_completion
4482 *leave NULL and vm flush fence in fence drv */
4483 for (j = 0; j <= ring->fence_drv.num_fences_mask; j++) {
4484 struct dma_fence *old, **ptr;
4485
4486 ptr = &ring->fence_drv.fences[j];
4487 old = rcu_dereference_protected(*ptr, 1);
4488 if (old && test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &old->flags)) {
4489 RCU_INIT_POINTER(*ptr, NULL);
4490 }
4491 }
2f9d4084
ML
4492 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4493 amdgpu_fence_driver_force_completion(ring);
0875dc9e 4494 }
d38ceaf9 4495
ff99849b 4496 if (job && job->vm)
222b5f04
AG
4497 drm_sched_increase_karma(&job->base);
4498
04442bf7 4499 r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
404b277b
LL
4500 /* If reset handler not implemented, continue; otherwise return */
4501 if (r == -ENOSYS)
4502 r = 0;
4503 else
04442bf7
LL
4504 return r;
4505
1d721ed6 4506 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
26bc5340
AG
4507 if (!amdgpu_sriov_vf(adev)) {
4508
4509 if (!need_full_reset)
4510 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4511
4512 if (!need_full_reset) {
4513 amdgpu_device_ip_pre_soft_reset(adev);
4514 r = amdgpu_device_ip_soft_reset(adev);
4515 amdgpu_device_ip_post_soft_reset(adev);
4516 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
aac89168 4517 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
26bc5340
AG
4518 need_full_reset = true;
4519 }
4520 }
4521
4522 if (need_full_reset)
4523 r = amdgpu_device_ip_suspend(adev);
04442bf7
LL
4524 if (need_full_reset)
4525 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4526 else
4527 clear_bit(AMDGPU_NEED_FULL_RESET,
4528 &reset_context->flags);
26bc5340
AG
4529 }
4530
4531 return r;
4532}
4533
04442bf7
LL
4534int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4535 struct amdgpu_reset_context *reset_context)
26bc5340
AG
4536{
4537 struct amdgpu_device *tmp_adev = NULL;
04442bf7 4538 bool need_full_reset, skip_hw_reset, vram_lost = false;
26bc5340
AG
4539 int r = 0;
4540
04442bf7
LL
4541 /* Try reset handler method first */
4542 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4543 reset_list);
4544 r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
404b277b
LL
4545 /* If reset handler not implemented, continue; otherwise return */
4546 if (r == -ENOSYS)
4547 r = 0;
4548 else
04442bf7
LL
4549 return r;
4550
4551 /* Reset handler not implemented, use the default method */
4552 need_full_reset =
4553 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4554 skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4555
26bc5340 4556 /*
655ce9cb 4557 * ASIC reset has to be done on all XGMI hive nodes ASAP
26bc5340
AG
4558 * to allow proper links negotiation in FW (within 1 sec)
4559 */
7ac71382 4560 if (!skip_hw_reset && need_full_reset) {
655ce9cb 4561 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
041a62bc 4562 /* For XGMI run all resets in parallel to speed up the process */
d4535e2c 4563 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
e3c1b071 4564 tmp_adev->gmc.xgmi.pending_reset = false;
c96cf282 4565 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
d4535e2c
AG
4566 r = -EALREADY;
4567 } else
4568 r = amdgpu_asic_reset(tmp_adev);
d4535e2c 4569
041a62bc 4570 if (r) {
aac89168 4571 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4a580877 4572 r, adev_to_drm(tmp_adev)->unique);
041a62bc 4573 break;
ce316fa5
LM
4574 }
4575 }
4576
041a62bc
AG
4577 /* For XGMI wait for all resets to complete before proceed */
4578 if (!r) {
655ce9cb 4579 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
ce316fa5
LM
4580 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4581 flush_work(&tmp_adev->xgmi_reset_work);
4582 r = tmp_adev->asic_reset_res;
4583 if (r)
4584 break;
ce316fa5
LM
4585 }
4586 }
4587 }
ce316fa5 4588 }
26bc5340 4589
43c4d576 4590 if (!r && amdgpu_ras_intr_triggered()) {
655ce9cb 4591 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
8bc7b360
HZ
4592 if (tmp_adev->mmhub.ras_funcs &&
4593 tmp_adev->mmhub.ras_funcs->reset_ras_error_count)
4594 tmp_adev->mmhub.ras_funcs->reset_ras_error_count(tmp_adev);
43c4d576
JC
4595 }
4596
00eaa571 4597 amdgpu_ras_intr_cleared();
43c4d576 4598 }
00eaa571 4599
655ce9cb 4600 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
26bc5340
AG
4601 if (need_full_reset) {
4602 /* post card */
e3c1b071 4603 r = amdgpu_device_asic_init(tmp_adev);
4604 if (r) {
aac89168 4605 dev_warn(tmp_adev->dev, "asic atom init failed!");
e3c1b071 4606 } else {
26bc5340 4607 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
9cec53c1
JZ
4608 r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4609 if (r)
4610 goto out;
4611
26bc5340
AG
4612 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4613 if (r)
4614 goto out;
4615
4616 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4617 if (vram_lost) {
77e7f829 4618 DRM_INFO("VRAM is lost due to GPU reset!\n");
e3526257 4619 amdgpu_inc_vram_lost(tmp_adev);
26bc5340
AG
4620 }
4621
6c28aed6 4622 r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
26bc5340
AG
4623 if (r)
4624 goto out;
4625
4626 r = amdgpu_device_fw_loading(tmp_adev);
4627 if (r)
4628 return r;
4629
4630 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4631 if (r)
4632 goto out;
4633
4634 if (vram_lost)
4635 amdgpu_device_fill_reset_magic(tmp_adev);
4636
fdafb359
EQ
4637 /*
4638 * Add this ASIC as tracked as reset was already
4639 * complete successfully.
4640 */
4641 amdgpu_register_gpu_instance(tmp_adev);
4642
04442bf7
LL
4643 if (!reset_context->hive &&
4644 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
e3c1b071 4645 amdgpu_xgmi_add_device(tmp_adev);
4646
7c04ca50 4647 r = amdgpu_device_ip_late_init(tmp_adev);
4648 if (r)
4649 goto out;
4650
087451f3 4651 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
565d1941 4652
e8fbaf03
GC
4653 /*
4654 * The GPU enters bad state once faulty pages
4655 * by ECC has reached the threshold, and ras
4656 * recovery is scheduled next. So add one check
4657 * here to break recovery if it indeed exceeds
4658 * bad page threshold, and remind user to
4659 * retire this GPU or setting one bigger
4660 * bad_page_threshold value to fix this once
4661 * probing driver again.
4662 */
11003c68 4663 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
e8fbaf03
GC
4664 /* must succeed. */
4665 amdgpu_ras_resume(tmp_adev);
4666 } else {
4667 r = -EINVAL;
4668 goto out;
4669 }
e79a04d5 4670
26bc5340 4671 /* Update PSP FW topology after reset */
04442bf7
LL
4672 if (reset_context->hive &&
4673 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4674 r = amdgpu_xgmi_update_topology(
4675 reset_context->hive, tmp_adev);
26bc5340
AG
4676 }
4677 }
4678
26bc5340
AG
4679out:
4680 if (!r) {
4681 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4682 r = amdgpu_ib_ring_tests(tmp_adev);
4683 if (r) {
4684 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
26bc5340
AG
4685 need_full_reset = true;
4686 r = -EAGAIN;
4687 goto end;
4688 }
4689 }
4690
4691 if (!r)
4692 r = amdgpu_device_recover_vram(tmp_adev);
4693 else
4694 tmp_adev->asic_reset_res = r;
4695 }
4696
4697end:
04442bf7
LL
4698 if (need_full_reset)
4699 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4700 else
4701 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
26bc5340
AG
4702 return r;
4703}
4704
08ebb485
DL
4705static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
4706 struct amdgpu_hive_info *hive)
26bc5340 4707{
53b3f8f4
DL
4708 if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
4709 return false;
4710
08ebb485
DL
4711 if (hive) {
4712 down_write_nest_lock(&adev->reset_sem, &hive->hive_lock);
4713 } else {
4714 down_write(&adev->reset_sem);
4715 }
5740682e 4716
a3a09142
AD
4717 switch (amdgpu_asic_reset_method(adev)) {
4718 case AMD_RESET_METHOD_MODE1:
4719 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4720 break;
4721 case AMD_RESET_METHOD_MODE2:
4722 adev->mp1_state = PP_MP1_STATE_RESET;
4723 break;
4724 default:
4725 adev->mp1_state = PP_MP1_STATE_NONE;
4726 break;
4727 }
1d721ed6
AG
4728
4729 return true;
26bc5340 4730}
d38ceaf9 4731
26bc5340
AG
4732static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
4733{
89041940 4734 amdgpu_vf_error_trans_all(adev);
a3a09142 4735 adev->mp1_state = PP_MP1_STATE_NONE;
53b3f8f4 4736 atomic_set(&adev->in_gpu_reset, 0);
6049db43 4737 up_write(&adev->reset_sem);
26bc5340
AG
4738}
4739
91fb309d
HC
4740/*
4741 * to lockup a list of amdgpu devices in a hive safely, if not a hive
4742 * with multiple nodes, it will be similar as amdgpu_device_lock_adev.
4743 *
4744 * unlock won't require roll back.
4745 */
4746static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive)
4747{
4748 struct amdgpu_device *tmp_adev = NULL;
4749
4750 if (adev->gmc.xgmi.num_physical_nodes > 1) {
4751 if (!hive) {
4752 dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes");
4753 return -ENODEV;
4754 }
4755 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4756 if (!amdgpu_device_lock_adev(tmp_adev, hive))
4757 goto roll_back;
4758 }
4759 } else if (!amdgpu_device_lock_adev(adev, hive))
4760 return -EAGAIN;
4761
4762 return 0;
4763roll_back:
4764 if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) {
4765 /*
4766 * if the lockup iteration break in the middle of a hive,
4767 * it may means there may has a race issue,
4768 * or a hive device locked up independently.
4769 * we may be in trouble and may not, so will try to roll back
4770 * the lock and give out a warnning.
4771 */
4772 dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock");
4773 list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4774 amdgpu_device_unlock_adev(tmp_adev);
4775 }
4776 }
4777 return -EAGAIN;
4778}
4779
3f12acc8
EQ
4780static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4781{
4782 struct pci_dev *p = NULL;
4783
4784 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4785 adev->pdev->bus->number, 1);
4786 if (p) {
4787 pm_runtime_enable(&(p->dev));
4788 pm_runtime_resume(&(p->dev));
4789 }
4790}
4791
4792static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4793{
4794 enum amd_reset_method reset_method;
4795 struct pci_dev *p = NULL;
4796 u64 expires;
4797
4798 /*
4799 * For now, only BACO and mode1 reset are confirmed
4800 * to suffer the audio issue without proper suspended.
4801 */
4802 reset_method = amdgpu_asic_reset_method(adev);
4803 if ((reset_method != AMD_RESET_METHOD_BACO) &&
4804 (reset_method != AMD_RESET_METHOD_MODE1))
4805 return -EINVAL;
4806
4807 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4808 adev->pdev->bus->number, 1);
4809 if (!p)
4810 return -ENODEV;
4811
4812 expires = pm_runtime_autosuspend_expiration(&(p->dev));
4813 if (!expires)
4814 /*
4815 * If we cannot get the audio device autosuspend delay,
4816 * a fixed 4S interval will be used. Considering 3S is
4817 * the audio controller default autosuspend delay setting.
4818 * 4S used here is guaranteed to cover that.
4819 */
54b7feb9 4820 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
3f12acc8
EQ
4821
4822 while (!pm_runtime_status_suspended(&(p->dev))) {
4823 if (!pm_runtime_suspend(&(p->dev)))
4824 break;
4825
4826 if (expires < ktime_get_mono_fast_ns()) {
4827 dev_warn(adev->dev, "failed to suspend display audio\n");
4828 /* TODO: abort the succeeding gpu reset? */
4829 return -ETIMEDOUT;
4830 }
4831 }
4832
4833 pm_runtime_disable(&(p->dev));
4834
4835 return 0;
4836}
4837
9d8d96be 4838static void amdgpu_device_recheck_guilty_jobs(
04442bf7
LL
4839 struct amdgpu_device *adev, struct list_head *device_list_handle,
4840 struct amdgpu_reset_context *reset_context)
e6c6338f
JZ
4841{
4842 int i, r = 0;
4843
4844 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4845 struct amdgpu_ring *ring = adev->rings[i];
4846 int ret = 0;
4847 struct drm_sched_job *s_job;
4848
4849 if (!ring || !ring->sched.thread)
4850 continue;
4851
4852 s_job = list_first_entry_or_null(&ring->sched.pending_list,
4853 struct drm_sched_job, list);
4854 if (s_job == NULL)
4855 continue;
4856
4857 /* clear job's guilty and depend the folowing step to decide the real one */
4858 drm_sched_reset_karma(s_job);
38d4e463
JC
4859 /* for the real bad job, it will be resubmitted twice, adding a dma_fence_get
4860 * to make sure fence is balanced */
4861 dma_fence_get(s_job->s_fence->parent);
e6c6338f
JZ
4862 drm_sched_resubmit_jobs_ext(&ring->sched, 1);
4863
4864 ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
4865 if (ret == 0) { /* timeout */
4866 DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
4867 ring->sched.name, s_job->id);
4868
4869 /* set guilty */
4870 drm_sched_increase_karma(s_job);
4871retry:
4872 /* do hw reset */
4873 if (amdgpu_sriov_vf(adev)) {
4874 amdgpu_virt_fini_data_exchange(adev);
4875 r = amdgpu_device_reset_sriov(adev, false);
4876 if (r)
4877 adev->asic_reset_res = r;
4878 } else {
04442bf7
LL
4879 clear_bit(AMDGPU_SKIP_HW_RESET,
4880 &reset_context->flags);
4881 r = amdgpu_do_asic_reset(device_list_handle,
4882 reset_context);
e6c6338f
JZ
4883 if (r && r == -EAGAIN)
4884 goto retry;
4885 }
4886
4887 /*
4888 * add reset counter so that the following
4889 * resubmitted job could flush vmid
4890 */
4891 atomic_inc(&adev->gpu_reset_counter);
4892 continue;
4893 }
4894
4895 /* got the hw fence, signal finished fence */
4896 atomic_dec(ring->sched.score);
38d4e463 4897 dma_fence_put(s_job->s_fence->parent);
e6c6338f
JZ
4898 dma_fence_get(&s_job->s_fence->finished);
4899 dma_fence_signal(&s_job->s_fence->finished);
4900 dma_fence_put(&s_job->s_fence->finished);
4901
4902 /* remove node from list and free the job */
4903 spin_lock(&ring->sched.job_list_lock);
4904 list_del_init(&s_job->list);
4905 spin_unlock(&ring->sched.job_list_lock);
4906 ring->sched.ops->free_job(s_job);
4907 }
4908}
4909
26bc5340
AG
4910/**
4911 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
4912 *
982a820b 4913 * @adev: amdgpu_device pointer
26bc5340
AG
4914 * @job: which job trigger hang
4915 *
4916 * Attempt to reset the GPU if it has hung (all asics).
4917 * Attempt to do soft-reset or full-reset and reinitialize Asic
4918 * Returns 0 for success or an error on failure.
4919 */
4920
4921int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
4922 struct amdgpu_job *job)
4923{
1d721ed6 4924 struct list_head device_list, *device_list_handle = NULL;
7dd8c205 4925 bool job_signaled = false;
26bc5340 4926 struct amdgpu_hive_info *hive = NULL;
26bc5340 4927 struct amdgpu_device *tmp_adev = NULL;
1d721ed6 4928 int i, r = 0;
bb5c7235 4929 bool need_emergency_restart = false;
3f12acc8 4930 bool audio_suspended = false;
e6c6338f 4931 int tmp_vram_lost_counter;
04442bf7
LL
4932 struct amdgpu_reset_context reset_context;
4933
4934 memset(&reset_context, 0, sizeof(reset_context));
26bc5340 4935
6e3cd2a9 4936 /*
bb5c7235
WS
4937 * Special case: RAS triggered and full reset isn't supported
4938 */
4939 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
4940
d5ea093e
AG
4941 /*
4942 * Flush RAM to disk so that after reboot
4943 * the user can read log and see why the system rebooted.
4944 */
bb5c7235 4945 if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
d5ea093e
AG
4946 DRM_WARN("Emergency reboot.");
4947
4948 ksys_sync_helper();
4949 emergency_restart();
4950 }
4951
b823821f 4952 dev_info(adev->dev, "GPU %s begin!\n",
bb5c7235 4953 need_emergency_restart ? "jobs stop":"reset");
26bc5340
AG
4954
4955 /*
1d721ed6
AG
4956 * Here we trylock to avoid chain of resets executing from
4957 * either trigger by jobs on different adevs in XGMI hive or jobs on
4958 * different schedulers for same device while this TO handler is running.
4959 * We always reset all schedulers for device and all devices for XGMI
4960 * hive so that should take care of them too.
26bc5340 4961 */
d95e8e97 4962 hive = amdgpu_get_xgmi_hive(adev);
53b3f8f4
DL
4963 if (hive) {
4964 if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
4965 DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
4966 job ? job->base.id : -1, hive->hive_id);
d95e8e97 4967 amdgpu_put_xgmi_hive(hive);
ff99849b 4968 if (job && job->vm)
91fb309d 4969 drm_sched_increase_karma(&job->base);
53b3f8f4
DL
4970 return 0;
4971 }
4972 mutex_lock(&hive->hive_lock);
1d721ed6 4973 }
26bc5340 4974
04442bf7
LL
4975 reset_context.method = AMD_RESET_METHOD_NONE;
4976 reset_context.reset_req_dev = adev;
4977 reset_context.job = job;
4978 reset_context.hive = hive;
4979 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
4980
91fb309d
HC
4981 /*
4982 * lock the device before we try to operate the linked list
4983 * if didn't get the device lock, don't touch the linked list since
4984 * others may iterating it.
4985 */
4986 r = amdgpu_device_lock_hive_adev(adev, hive);
4987 if (r) {
4988 dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
4989 job ? job->base.id : -1);
4990
4991 /* even we skipped this reset, still need to set the job to guilty */
ff99849b 4992 if (job && job->vm)
91fb309d
HC
4993 drm_sched_increase_karma(&job->base);
4994 goto skip_recovery;
4995 }
4996
9e94d22c
EQ
4997 /*
4998 * Build list of devices to reset.
4999 * In case we are in XGMI hive mode, resort the device list
5000 * to put adev in the 1st position.
5001 */
5002 INIT_LIST_HEAD(&device_list);
5003 if (adev->gmc.xgmi.num_physical_nodes > 1) {
655ce9cb 5004 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
5005 list_add_tail(&tmp_adev->reset_list, &device_list);
5006 if (!list_is_first(&adev->reset_list, &device_list))
5007 list_rotate_to_front(&adev->reset_list, &device_list);
5008 device_list_handle = &device_list;
26bc5340 5009 } else {
655ce9cb 5010 list_add_tail(&adev->reset_list, &device_list);
26bc5340
AG
5011 device_list_handle = &device_list;
5012 }
5013
1d721ed6 5014 /* block all schedulers and reset given job's ring */
655ce9cb 5015 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
3f12acc8
EQ
5016 /*
5017 * Try to put the audio codec into suspend state
5018 * before gpu reset started.
5019 *
5020 * Due to the power domain of the graphics device
5021 * is shared with AZ power domain. Without this,
5022 * we may change the audio hardware from behind
5023 * the audio driver's back. That will trigger
5024 * some audio codec errors.
5025 */
5026 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5027 audio_suspended = true;
5028
9e94d22c
EQ
5029 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5030
52fb44cf
EQ
5031 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5032
992110d7 5033 if (!amdgpu_sriov_vf(tmp_adev))
5034 amdgpu_amdkfd_pre_reset(tmp_adev);
9e94d22c 5035
12ffa55d
AG
5036 /*
5037 * Mark these ASICs to be reseted as untracked first
5038 * And add them back after reset completed
5039 */
5040 amdgpu_unregister_gpu_instance(tmp_adev);
5041
087451f3 5042 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
565d1941 5043
f1c1314b 5044 /* disable ras on ALL IPs */
bb5c7235 5045 if (!need_emergency_restart &&
b823821f 5046 amdgpu_device_ip_need_full_reset(tmp_adev))
f1c1314b 5047 amdgpu_ras_suspend(tmp_adev);
5048
1d721ed6
AG
5049 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5050 struct amdgpu_ring *ring = tmp_adev->rings[i];
5051
5052 if (!ring || !ring->sched.thread)
5053 continue;
5054
0b2d2c2e 5055 drm_sched_stop(&ring->sched, job ? &job->base : NULL);
7c6e68c7 5056
bb5c7235 5057 if (need_emergency_restart)
7c6e68c7 5058 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
1d721ed6 5059 }
8f8c80f4 5060 atomic_inc(&tmp_adev->gpu_reset_counter);
1d721ed6
AG
5061 }
5062
bb5c7235 5063 if (need_emergency_restart)
7c6e68c7
AG
5064 goto skip_sched_resume;
5065
1d721ed6
AG
5066 /*
5067 * Must check guilty signal here since after this point all old
5068 * HW fences are force signaled.
5069 *
5070 * job->base holds a reference to parent fence
5071 */
5072 if (job && job->base.s_fence->parent &&
7dd8c205 5073 dma_fence_is_signaled(job->base.s_fence->parent)) {
1d721ed6 5074 job_signaled = true;
1d721ed6
AG
5075 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5076 goto skip_hw_reset;
5077 }
5078
26bc5340 5079retry: /* Rest of adevs pre asic reset from XGMI hive. */
655ce9cb 5080 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
04442bf7 5081 r = amdgpu_device_pre_asic_reset(tmp_adev, &reset_context);
26bc5340
AG
5082 /*TODO Should we stop ?*/
5083 if (r) {
aac89168 5084 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
4a580877 5085 r, adev_to_drm(tmp_adev)->unique);
26bc5340
AG
5086 tmp_adev->asic_reset_res = r;
5087 }
5088 }
5089
e6c6338f 5090 tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
26bc5340 5091 /* Actual ASIC resets if needed.*/
4f30d920 5092 /* Host driver will handle XGMI hive reset for SRIOV */
26bc5340
AG
5093 if (amdgpu_sriov_vf(adev)) {
5094 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5095 if (r)
5096 adev->asic_reset_res = r;
5097 } else {
04442bf7 5098 r = amdgpu_do_asic_reset(device_list_handle, &reset_context);
26bc5340
AG
5099 if (r && r == -EAGAIN)
5100 goto retry;
5101 }
5102
1d721ed6
AG
5103skip_hw_reset:
5104
26bc5340 5105 /* Post ASIC reset for all devs .*/
655ce9cb 5106 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
7c6e68c7 5107
e6c6338f
JZ
5108 /*
5109 * Sometimes a later bad compute job can block a good gfx job as gfx
5110 * and compute ring share internal GC HW mutually. We add an additional
5111 * guilty jobs recheck step to find the real guilty job, it synchronously
5112 * submits and pends for the first job being signaled. If it gets timeout,
5113 * we identify it as a real guilty job.
5114 */
5115 if (amdgpu_gpu_recovery == 2 &&
5116 !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
04442bf7
LL
5117 amdgpu_device_recheck_guilty_jobs(
5118 tmp_adev, device_list_handle, &reset_context);
e6c6338f 5119
1d721ed6
AG
5120 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5121 struct amdgpu_ring *ring = tmp_adev->rings[i];
5122
5123 if (!ring || !ring->sched.thread)
5124 continue;
5125
5126 /* No point to resubmit jobs if we didn't HW reset*/
5127 if (!tmp_adev->asic_reset_res && !job_signaled)
5128 drm_sched_resubmit_jobs(&ring->sched);
5129
5130 drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
5131 }
5132
700de2c8 5133 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
4a580877 5134 drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
1d721ed6
AG
5135 }
5136
5137 tmp_adev->asic_reset_res = 0;
26bc5340
AG
5138
5139 if (r) {
5140 /* bad news, how to tell it to userspace ? */
12ffa55d 5141 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
26bc5340
AG
5142 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5143 } else {
12ffa55d 5144 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
3fa8f89d
S
5145 if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5146 DRM_WARN("smart shift update failed\n");
26bc5340 5147 }
7c6e68c7 5148 }
26bc5340 5149
7c6e68c7 5150skip_sched_resume:
655ce9cb 5151 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
992110d7 5152 /* unlock kfd: SRIOV would do it separately */
5153 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5154 amdgpu_amdkfd_post_reset(tmp_adev);
8e2712e7 5155
5156 /* kfd_post_reset will do nothing if kfd device is not initialized,
5157 * need to bring up kfd here if it's not be initialized before
5158 */
5159 if (!adev->kfd.init_complete)
5160 amdgpu_amdkfd_device_init(adev);
5161
3f12acc8
EQ
5162 if (audio_suspended)
5163 amdgpu_device_resume_display_audio(tmp_adev);
26bc5340
AG
5164 amdgpu_device_unlock_adev(tmp_adev);
5165 }
5166
cbfd17f7 5167skip_recovery:
9e94d22c 5168 if (hive) {
53b3f8f4 5169 atomic_set(&hive->in_reset, 0);
9e94d22c 5170 mutex_unlock(&hive->hive_lock);
d95e8e97 5171 amdgpu_put_xgmi_hive(hive);
9e94d22c 5172 }
26bc5340 5173
91fb309d 5174 if (r && r != -EAGAIN)
26bc5340 5175 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
d38ceaf9
AD
5176 return r;
5177}
5178
e3ecdffa
AD
5179/**
5180 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5181 *
5182 * @adev: amdgpu_device pointer
5183 *
5184 * Fetchs and stores in the driver the PCIE capabilities (gen speed
5185 * and lanes) of the slot the device is in. Handles APUs and
5186 * virtualized environments where PCIE config space may not be available.
5187 */
5494d864 5188static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
d0dd7f0c 5189{
5d9a6330 5190 struct pci_dev *pdev;
c5313457
HK
5191 enum pci_bus_speed speed_cap, platform_speed_cap;
5192 enum pcie_link_width platform_link_width;
d0dd7f0c 5193
cd474ba0
AD
5194 if (amdgpu_pcie_gen_cap)
5195 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
d0dd7f0c 5196
cd474ba0
AD
5197 if (amdgpu_pcie_lane_cap)
5198 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
d0dd7f0c 5199
cd474ba0
AD
5200 /* covers APUs as well */
5201 if (pci_is_root_bus(adev->pdev->bus)) {
5202 if (adev->pm.pcie_gen_mask == 0)
5203 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5204 if (adev->pm.pcie_mlw_mask == 0)
5205 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c 5206 return;
cd474ba0 5207 }
d0dd7f0c 5208
c5313457
HK
5209 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5210 return;
5211
dbaa922b
AD
5212 pcie_bandwidth_available(adev->pdev, NULL,
5213 &platform_speed_cap, &platform_link_width);
c5313457 5214
cd474ba0 5215 if (adev->pm.pcie_gen_mask == 0) {
5d9a6330
AD
5216 /* asic caps */
5217 pdev = adev->pdev;
5218 speed_cap = pcie_get_speed_cap(pdev);
5219 if (speed_cap == PCI_SPEED_UNKNOWN) {
5220 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
cd474ba0
AD
5221 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5222 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
cd474ba0 5223 } else {
2b3a1f51
FX
5224 if (speed_cap == PCIE_SPEED_32_0GT)
5225 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5226 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5227 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5228 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5229 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5230 else if (speed_cap == PCIE_SPEED_16_0GT)
5d9a6330
AD
5231 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5232 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5233 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5234 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5235 else if (speed_cap == PCIE_SPEED_8_0GT)
5236 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5237 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5238 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5239 else if (speed_cap == PCIE_SPEED_5_0GT)
5240 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5241 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5242 else
5243 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5244 }
5245 /* platform caps */
c5313457 5246 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5d9a6330
AD
5247 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5248 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5249 } else {
2b3a1f51
FX
5250 if (platform_speed_cap == PCIE_SPEED_32_0GT)
5251 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5252 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5253 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5254 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5255 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5256 else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5d9a6330
AD
5257 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5258 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5259 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5260 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
c5313457 5261 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5d9a6330
AD
5262 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5263 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5264 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
c5313457 5265 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5d9a6330
AD
5266 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5267 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5268 else
5269 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5270
cd474ba0
AD
5271 }
5272 }
5273 if (adev->pm.pcie_mlw_mask == 0) {
c5313457 5274 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5d9a6330
AD
5275 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5276 } else {
c5313457 5277 switch (platform_link_width) {
5d9a6330 5278 case PCIE_LNK_X32:
cd474ba0
AD
5279 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5280 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5281 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5282 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5283 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5284 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5285 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5286 break;
5d9a6330 5287 case PCIE_LNK_X16:
cd474ba0
AD
5288 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5289 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5290 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5291 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5292 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5293 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5294 break;
5d9a6330 5295 case PCIE_LNK_X12:
cd474ba0
AD
5296 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5297 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5298 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5299 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5300 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5301 break;
5d9a6330 5302 case PCIE_LNK_X8:
cd474ba0
AD
5303 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5304 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5305 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5306 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5307 break;
5d9a6330 5308 case PCIE_LNK_X4:
cd474ba0
AD
5309 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5310 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5311 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5312 break;
5d9a6330 5313 case PCIE_LNK_X2:
cd474ba0
AD
5314 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5315 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5316 break;
5d9a6330 5317 case PCIE_LNK_X1:
cd474ba0
AD
5318 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5319 break;
5320 default:
5321 break;
5322 }
d0dd7f0c
AD
5323 }
5324 }
5325}
d38ceaf9 5326
361dbd01
AD
5327int amdgpu_device_baco_enter(struct drm_device *dev)
5328{
1348969a 5329 struct amdgpu_device *adev = drm_to_adev(dev);
7a22677b 5330 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
361dbd01 5331
4a580877 5332 if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
361dbd01
AD
5333 return -ENOTSUPP;
5334
8ab0d6f0 5335 if (ras && adev->ras_enabled &&
acdae216 5336 adev->nbio.funcs->enable_doorbell_interrupt)
7a22677b
LM
5337 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5338
9530273e 5339 return amdgpu_dpm_baco_enter(adev);
361dbd01
AD
5340}
5341
5342int amdgpu_device_baco_exit(struct drm_device *dev)
5343{
1348969a 5344 struct amdgpu_device *adev = drm_to_adev(dev);
7a22677b 5345 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
9530273e 5346 int ret = 0;
361dbd01 5347
4a580877 5348 if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
361dbd01
AD
5349 return -ENOTSUPP;
5350
9530273e
EQ
5351 ret = amdgpu_dpm_baco_exit(adev);
5352 if (ret)
5353 return ret;
7a22677b 5354
8ab0d6f0 5355 if (ras && adev->ras_enabled &&
acdae216 5356 adev->nbio.funcs->enable_doorbell_interrupt)
7a22677b
LM
5357 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5358
1bece222
CL
5359 if (amdgpu_passthrough(adev) &&
5360 adev->nbio.funcs->clear_doorbell_interrupt)
5361 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5362
7a22677b 5363 return 0;
361dbd01 5364}
c9a6b82f 5365
acd89fca
AG
5366static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
5367{
5368 int i;
5369
5370 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5371 struct amdgpu_ring *ring = adev->rings[i];
5372
5373 if (!ring || !ring->sched.thread)
5374 continue;
5375
5376 cancel_delayed_work_sync(&ring->sched.work_tdr);
5377 }
5378}
5379
c9a6b82f
AG
5380/**
5381 * amdgpu_pci_error_detected - Called when a PCI error is detected.
5382 * @pdev: PCI device struct
5383 * @state: PCI channel state
5384 *
5385 * Description: Called when a PCI error is detected.
5386 *
5387 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5388 */
5389pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5390{
5391 struct drm_device *dev = pci_get_drvdata(pdev);
5392 struct amdgpu_device *adev = drm_to_adev(dev);
acd89fca 5393 int i;
c9a6b82f
AG
5394
5395 DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5396
6894305c
AG
5397 if (adev->gmc.xgmi.num_physical_nodes > 1) {
5398 DRM_WARN("No support for XGMI hive yet...");
5399 return PCI_ERS_RESULT_DISCONNECT;
5400 }
5401
e17e27f9
GC
5402 adev->pci_channel_state = state;
5403
c9a6b82f
AG
5404 switch (state) {
5405 case pci_channel_io_normal:
5406 return PCI_ERS_RESULT_CAN_RECOVER;
acd89fca 5407 /* Fatal error, prepare for slot reset */
8a11d283
TZ
5408 case pci_channel_io_frozen:
5409 /*
acd89fca
AG
5410 * Cancel and wait for all TDRs in progress if failing to
5411 * set adev->in_gpu_reset in amdgpu_device_lock_adev
5412 *
5413 * Locking adev->reset_sem will prevent any external access
5414 * to GPU during PCI error recovery
5415 */
5416 while (!amdgpu_device_lock_adev(adev, NULL))
5417 amdgpu_cancel_all_tdr(adev);
5418
5419 /*
5420 * Block any work scheduling as we do for regular GPU reset
5421 * for the duration of the recovery
5422 */
5423 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5424 struct amdgpu_ring *ring = adev->rings[i];
5425
5426 if (!ring || !ring->sched.thread)
5427 continue;
5428
5429 drm_sched_stop(&ring->sched, NULL);
5430 }
8f8c80f4 5431 atomic_inc(&adev->gpu_reset_counter);
c9a6b82f
AG
5432 return PCI_ERS_RESULT_NEED_RESET;
5433 case pci_channel_io_perm_failure:
5434 /* Permanent error, prepare for device removal */
5435 return PCI_ERS_RESULT_DISCONNECT;
5436 }
5437
5438 return PCI_ERS_RESULT_NEED_RESET;
5439}
5440
5441/**
5442 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5443 * @pdev: pointer to PCI device
5444 */
5445pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5446{
5447
5448 DRM_INFO("PCI error: mmio enabled callback!!\n");
5449
5450 /* TODO - dump whatever for debugging purposes */
5451
5452 /* This called only if amdgpu_pci_error_detected returns
5453 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5454 * works, no need to reset slot.
5455 */
5456
5457 return PCI_ERS_RESULT_RECOVERED;
5458}
5459
5460/**
5461 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5462 * @pdev: PCI device struct
5463 *
5464 * Description: This routine is called by the pci error recovery
5465 * code after the PCI slot has been reset, just before we
5466 * should resume normal operations.
5467 */
5468pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5469{
5470 struct drm_device *dev = pci_get_drvdata(pdev);
5471 struct amdgpu_device *adev = drm_to_adev(dev);
362c7b91 5472 int r, i;
04442bf7 5473 struct amdgpu_reset_context reset_context;
362c7b91 5474 u32 memsize;
7ac71382 5475 struct list_head device_list;
c9a6b82f
AG
5476
5477 DRM_INFO("PCI error: slot reset callback!!\n");
5478
04442bf7
LL
5479 memset(&reset_context, 0, sizeof(reset_context));
5480
7ac71382 5481 INIT_LIST_HEAD(&device_list);
655ce9cb 5482 list_add_tail(&adev->reset_list, &device_list);
7ac71382 5483
362c7b91
AG
5484 /* wait for asic to come out of reset */
5485 msleep(500);
5486
7ac71382 5487 /* Restore PCI confspace */
c1dd4aa6 5488 amdgpu_device_load_pci_state(pdev);
c9a6b82f 5489
362c7b91
AG
5490 /* confirm ASIC came out of reset */
5491 for (i = 0; i < adev->usec_timeout; i++) {
5492 memsize = amdgpu_asic_get_config_memsize(adev);
5493
5494 if (memsize != 0xffffffff)
5495 break;
5496 udelay(1);
5497 }
5498 if (memsize == 0xffffffff) {
5499 r = -ETIME;
5500 goto out;
5501 }
5502
04442bf7
LL
5503 reset_context.method = AMD_RESET_METHOD_NONE;
5504 reset_context.reset_req_dev = adev;
5505 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5506 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5507
7afefb81 5508 adev->no_hw_access = true;
04442bf7 5509 r = amdgpu_device_pre_asic_reset(adev, &reset_context);
7afefb81 5510 adev->no_hw_access = false;
c9a6b82f
AG
5511 if (r)
5512 goto out;
5513
04442bf7 5514 r = amdgpu_do_asic_reset(&device_list, &reset_context);
c9a6b82f
AG
5515
5516out:
c9a6b82f 5517 if (!r) {
c1dd4aa6
AG
5518 if (amdgpu_device_cache_pci_state(adev->pdev))
5519 pci_restore_state(adev->pdev);
5520
c9a6b82f
AG
5521 DRM_INFO("PCIe error recovery succeeded\n");
5522 } else {
5523 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5524 amdgpu_device_unlock_adev(adev);
5525 }
5526
5527 return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5528}
5529
5530/**
5531 * amdgpu_pci_resume() - resume normal ops after PCI reset
5532 * @pdev: pointer to PCI device
5533 *
5534 * Called when the error recovery driver tells us that its
505199a3 5535 * OK to resume normal operation.
c9a6b82f
AG
5536 */
5537void amdgpu_pci_resume(struct pci_dev *pdev)
5538{
5539 struct drm_device *dev = pci_get_drvdata(pdev);
5540 struct amdgpu_device *adev = drm_to_adev(dev);
acd89fca 5541 int i;
c9a6b82f 5542
c9a6b82f
AG
5543
5544 DRM_INFO("PCI error: resume callback!!\n");
acd89fca 5545
e17e27f9
GC
5546 /* Only continue execution for the case of pci_channel_io_frozen */
5547 if (adev->pci_channel_state != pci_channel_io_frozen)
5548 return;
5549
acd89fca
AG
5550 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5551 struct amdgpu_ring *ring = adev->rings[i];
5552
5553 if (!ring || !ring->sched.thread)
5554 continue;
5555
5556
5557 drm_sched_resubmit_jobs(&ring->sched);
5558 drm_sched_start(&ring->sched, true);
5559 }
5560
5561 amdgpu_device_unlock_adev(adev);
c9a6b82f 5562}
c1dd4aa6
AG
5563
5564bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5565{
5566 struct drm_device *dev = pci_get_drvdata(pdev);
5567 struct amdgpu_device *adev = drm_to_adev(dev);
5568 int r;
5569
5570 r = pci_save_state(pdev);
5571 if (!r) {
5572 kfree(adev->pci_state);
5573
5574 adev->pci_state = pci_store_saved_state(pdev);
5575
5576 if (!adev->pci_state) {
5577 DRM_ERROR("Failed to store PCI saved state");
5578 return false;
5579 }
5580 } else {
5581 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5582 return false;
5583 }
5584
5585 return true;
5586}
5587
5588bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5589{
5590 struct drm_device *dev = pci_get_drvdata(pdev);
5591 struct amdgpu_device *adev = drm_to_adev(dev);
5592 int r;
5593
5594 if (!adev->pci_state)
5595 return false;
5596
5597 r = pci_load_saved_state(pdev, adev->pci_state);
5598
5599 if (!r) {
5600 pci_restore_state(pdev);
5601 } else {
5602 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5603 return false;
5604 }
5605
5606 return true;
5607}
5608
810085dd
EH
5609void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5610 struct amdgpu_ring *ring)
5611{
5612#ifdef CONFIG_X86_64
5613 if (adev->flags & AMD_IS_APU)
5614 return;
5615#endif
5616 if (adev->gmc.xgmi.connected_to_cpu)
5617 return;
5618
5619 if (ring && ring->funcs->emit_hdp_flush)
5620 amdgpu_ring_emit_hdp_flush(ring);
5621 else
5622 amdgpu_asic_flush_hdp(adev, ring);
5623}
c1dd4aa6 5624
810085dd
EH
5625void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5626 struct amdgpu_ring *ring)
5627{
5628#ifdef CONFIG_X86_64
5629 if (adev->flags & AMD_IS_APU)
5630 return;
5631#endif
5632 if (adev->gmc.xgmi.connected_to_cpu)
5633 return;
c1dd4aa6 5634
810085dd
EH
5635 amdgpu_asic_invalidate_hdp(adev, ring);
5636}