drm/amdgpu/mes: add helper functions to alloc/free ctx metadata
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
b1ddf548 28#include <linux/power_supply.h>
0875dc9e 29#include <linux/kthread.h>
fdf2f6c5 30#include <linux/module.h>
d38ceaf9
AD
31#include <linux/console.h>
32#include <linux/slab.h>
4a74c38c 33#include <linux/iommu.h>
901e2be2 34#include <linux/pci.h>
fdf2f6c5 35
4562236b 36#include <drm/drm_atomic_helper.h>
fcd70cd3 37#include <drm/drm_probe_helper.h>
d38ceaf9
AD
38#include <drm/amdgpu_drm.h>
39#include <linux/vgaarb.h>
40#include <linux/vga_switcheroo.h>
41#include <linux/efi.h>
42#include "amdgpu.h"
f4b373f4 43#include "amdgpu_trace.h"
d38ceaf9
AD
44#include "amdgpu_i2c.h"
45#include "atom.h"
46#include "amdgpu_atombios.h"
a5bde2f9 47#include "amdgpu_atomfirmware.h"
d0dd7f0c 48#include "amd_pcie.h"
33f34802
KW
49#ifdef CONFIG_DRM_AMDGPU_SI
50#include "si.h"
51#endif
a2e73f56
AD
52#ifdef CONFIG_DRM_AMDGPU_CIK
53#include "cik.h"
54#endif
aaa36a97 55#include "vi.h"
460826e6 56#include "soc15.h"
0a5b8c7b 57#include "nv.h"
d38ceaf9 58#include "bif/bif_4_1_d.h"
bec86378 59#include <linux/firmware.h>
89041940 60#include "amdgpu_vf_error.h"
d38ceaf9 61
ba997709 62#include "amdgpu_amdkfd.h"
d2f52ac8 63#include "amdgpu_pm.h"
d38ceaf9 64
5183411b 65#include "amdgpu_xgmi.h"
c030f2e4 66#include "amdgpu_ras.h"
9c7c85f7 67#include "amdgpu_pmu.h"
bd607166 68#include "amdgpu_fru_eeprom.h"
04442bf7 69#include "amdgpu_reset.h"
5183411b 70
d5ea093e 71#include <linux/suspend.h>
c6a6e2db 72#include <drm/task_barrier.h>
3f12acc8 73#include <linux/pm_runtime.h>
d5ea093e 74
f89f8c6b
AG
75#include <drm/drm_drv.h>
76
e2a75f88 77MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
3f76dced 78MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
2d2e5e7e 79MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
ad5a67a7 80MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
54c4d17e 81MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
65e60f6e 82MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
42b325e5 83MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
e2a75f88 84
2dc80b00 85#define AMDGPU_RESUME_MS 2000
7258fa31
SK
86#define AMDGPU_MAX_RETRY_LIMIT 2
87#define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
2dc80b00 88
050091ab 89const char *amdgpu_asic_name[] = {
da69c161
KW
90 "TAHITI",
91 "PITCAIRN",
92 "VERDE",
93 "OLAND",
94 "HAINAN",
d38ceaf9
AD
95 "BONAIRE",
96 "KAVERI",
97 "KABINI",
98 "HAWAII",
99 "MULLINS",
100 "TOPAZ",
101 "TONGA",
48299f95 102 "FIJI",
d38ceaf9 103 "CARRIZO",
139f4917 104 "STONEY",
2cc0c0b5
FC
105 "POLARIS10",
106 "POLARIS11",
c4642a47 107 "POLARIS12",
48ff108d 108 "VEGAM",
d4196f01 109 "VEGA10",
8fab806a 110 "VEGA12",
956fcddc 111 "VEGA20",
2ca8a5d2 112 "RAVEN",
d6c3b24e 113 "ARCTURUS",
1eee4228 114 "RENOIR",
d46b417a 115 "ALDEBARAN",
852a6626 116 "NAVI10",
d0f56dc2 117 "CYAN_SKILLFISH",
87dbad02 118 "NAVI14",
9802f5d7 119 "NAVI12",
ccaf72d3 120 "SIENNA_CICHLID",
ddd8fbe7 121 "NAVY_FLOUNDER",
4f1e9a76 122 "VANGOGH",
a2468e04 123 "DIMGREY_CAVEFISH",
6f169591 124 "BEIGE_GOBY",
ee9236b7 125 "YELLOW_CARP",
3ae695d6 126 "IP DISCOVERY",
d38ceaf9
AD
127 "LAST",
128};
129
dcea6e65
KR
130/**
131 * DOC: pcie_replay_count
132 *
133 * The amdgpu driver provides a sysfs API for reporting the total number
134 * of PCIe replays (NAKs)
135 * The file pcie_replay_count is used for this and returns the total
136 * number of replays as a sum of the NAKs generated and NAKs received
137 */
138
139static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
140 struct device_attribute *attr, char *buf)
141{
142 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 143 struct amdgpu_device *adev = drm_to_adev(ddev);
dcea6e65
KR
144 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
145
36000c7a 146 return sysfs_emit(buf, "%llu\n", cnt);
dcea6e65
KR
147}
148
149static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
150 amdgpu_device_get_pcie_replay_count, NULL);
151
5494d864
AD
152static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
153
bd607166
KR
154/**
155 * DOC: product_name
156 *
157 * The amdgpu driver provides a sysfs API for reporting the product name
158 * for the device
159 * The file serial_number is used for this and returns the product name
160 * as returned from the FRU.
161 * NOTE: This is only available for certain server cards
162 */
163
164static ssize_t amdgpu_device_get_product_name(struct device *dev,
165 struct device_attribute *attr, char *buf)
166{
167 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 168 struct amdgpu_device *adev = drm_to_adev(ddev);
bd607166 169
36000c7a 170 return sysfs_emit(buf, "%s\n", adev->product_name);
bd607166
KR
171}
172
173static DEVICE_ATTR(product_name, S_IRUGO,
174 amdgpu_device_get_product_name, NULL);
175
176/**
177 * DOC: product_number
178 *
179 * The amdgpu driver provides a sysfs API for reporting the part number
180 * for the device
181 * The file serial_number is used for this and returns the part number
182 * as returned from the FRU.
183 * NOTE: This is only available for certain server cards
184 */
185
186static ssize_t amdgpu_device_get_product_number(struct device *dev,
187 struct device_attribute *attr, char *buf)
188{
189 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 190 struct amdgpu_device *adev = drm_to_adev(ddev);
bd607166 191
36000c7a 192 return sysfs_emit(buf, "%s\n", adev->product_number);
bd607166
KR
193}
194
195static DEVICE_ATTR(product_number, S_IRUGO,
196 amdgpu_device_get_product_number, NULL);
197
198/**
199 * DOC: serial_number
200 *
201 * The amdgpu driver provides a sysfs API for reporting the serial number
202 * for the device
203 * The file serial_number is used for this and returns the serial number
204 * as returned from the FRU.
205 * NOTE: This is only available for certain server cards
206 */
207
208static ssize_t amdgpu_device_get_serial_number(struct device *dev,
209 struct device_attribute *attr, char *buf)
210{
211 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 212 struct amdgpu_device *adev = drm_to_adev(ddev);
bd607166 213
36000c7a 214 return sysfs_emit(buf, "%s\n", adev->serial);
bd607166
KR
215}
216
217static DEVICE_ATTR(serial_number, S_IRUGO,
218 amdgpu_device_get_serial_number, NULL);
219
fd496ca8 220/**
b98c6299 221 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
fd496ca8
AD
222 *
223 * @dev: drm_device pointer
224 *
b98c6299 225 * Returns true if the device is a dGPU with ATPX power control,
fd496ca8
AD
226 * otherwise return false.
227 */
b98c6299 228bool amdgpu_device_supports_px(struct drm_device *dev)
fd496ca8
AD
229{
230 struct amdgpu_device *adev = drm_to_adev(dev);
231
b98c6299 232 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
fd496ca8
AD
233 return true;
234 return false;
235}
236
e3ecdffa 237/**
0330b848 238 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
e3ecdffa
AD
239 *
240 * @dev: drm_device pointer
241 *
b98c6299 242 * Returns true if the device is a dGPU with ACPI power control,
e3ecdffa
AD
243 * otherwise return false.
244 */
31af062a 245bool amdgpu_device_supports_boco(struct drm_device *dev)
d38ceaf9 246{
1348969a 247 struct amdgpu_device *adev = drm_to_adev(dev);
d38ceaf9 248
b98c6299
AD
249 if (adev->has_pr3 ||
250 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
d38ceaf9
AD
251 return true;
252 return false;
253}
254
a69cba42
AD
255/**
256 * amdgpu_device_supports_baco - Does the device support BACO
257 *
258 * @dev: drm_device pointer
259 *
260 * Returns true if the device supporte BACO,
261 * otherwise return false.
262 */
263bool amdgpu_device_supports_baco(struct drm_device *dev)
264{
1348969a 265 struct amdgpu_device *adev = drm_to_adev(dev);
a69cba42
AD
266
267 return amdgpu_asic_supports_baco(adev);
268}
269
3fa8f89d
S
270/**
271 * amdgpu_device_supports_smart_shift - Is the device dGPU with
272 * smart shift support
273 *
274 * @dev: drm_device pointer
275 *
276 * Returns true if the device is a dGPU with Smart Shift support,
277 * otherwise returns false.
278 */
279bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
280{
281 return (amdgpu_device_supports_boco(dev) &&
282 amdgpu_acpi_is_power_shift_control_supported());
283}
284
6e3cd2a9
MCC
285/*
286 * VRAM access helper functions
287 */
288
e35e2b11 289/**
048af66b 290 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
e35e2b11
TY
291 *
292 * @adev: amdgpu_device pointer
293 * @pos: offset of the buffer in vram
294 * @buf: virtual address of the buffer in system memory
295 * @size: read/write size, sizeof(@buf) must > @size
296 * @write: true - write to vram, otherwise - read from vram
297 */
048af66b
KW
298void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
299 void *buf, size_t size, bool write)
e35e2b11 300{
e35e2b11 301 unsigned long flags;
048af66b
KW
302 uint32_t hi = ~0, tmp = 0;
303 uint32_t *data = buf;
ce05ac56 304 uint64_t last;
f89f8c6b 305 int idx;
ce05ac56 306
c58a863b 307 if (!drm_dev_enter(adev_to_drm(adev), &idx))
f89f8c6b 308 return;
9d11eb0d 309
048af66b
KW
310 BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
311
312 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
313 for (last = pos + size; pos < last; pos += 4) {
314 tmp = pos >> 31;
315
316 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
317 if (tmp != hi) {
318 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
319 hi = tmp;
320 }
321 if (write)
322 WREG32_NO_KIQ(mmMM_DATA, *data++);
323 else
324 *data++ = RREG32_NO_KIQ(mmMM_DATA);
325 }
326
327 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
328 drm_dev_exit(idx);
329}
330
331/**
bbe04dec 332 * amdgpu_device_aper_access - access vram by vram aperature
048af66b
KW
333 *
334 * @adev: amdgpu_device pointer
335 * @pos: offset of the buffer in vram
336 * @buf: virtual address of the buffer in system memory
337 * @size: read/write size, sizeof(@buf) must > @size
338 * @write: true - write to vram, otherwise - read from vram
339 *
340 * The return value means how many bytes have been transferred.
341 */
342size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
343 void *buf, size_t size, bool write)
344{
9d11eb0d 345#ifdef CONFIG_64BIT
048af66b
KW
346 void __iomem *addr;
347 size_t count = 0;
348 uint64_t last;
349
350 if (!adev->mman.aper_base_kaddr)
351 return 0;
352
9d11eb0d
CK
353 last = min(pos + size, adev->gmc.visible_vram_size);
354 if (last > pos) {
048af66b
KW
355 addr = adev->mman.aper_base_kaddr + pos;
356 count = last - pos;
9d11eb0d
CK
357
358 if (write) {
359 memcpy_toio(addr, buf, count);
360 mb();
810085dd 361 amdgpu_device_flush_hdp(adev, NULL);
9d11eb0d 362 } else {
810085dd 363 amdgpu_device_invalidate_hdp(adev, NULL);
9d11eb0d
CK
364 mb();
365 memcpy_fromio(buf, addr, count);
366 }
367
9d11eb0d 368 }
048af66b
KW
369
370 return count;
371#else
372 return 0;
9d11eb0d 373#endif
048af66b 374}
9d11eb0d 375
048af66b
KW
376/**
377 * amdgpu_device_vram_access - read/write a buffer in vram
378 *
379 * @adev: amdgpu_device pointer
380 * @pos: offset of the buffer in vram
381 * @buf: virtual address of the buffer in system memory
382 * @size: read/write size, sizeof(@buf) must > @size
383 * @write: true - write to vram, otherwise - read from vram
384 */
385void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
386 void *buf, size_t size, bool write)
387{
388 size_t count;
e35e2b11 389
048af66b
KW
390 /* try to using vram apreature to access vram first */
391 count = amdgpu_device_aper_access(adev, pos, buf, size, write);
392 size -= count;
393 if (size) {
394 /* using MM to access rest vram */
395 pos += count;
396 buf += count;
397 amdgpu_device_mm_access(adev, pos, buf, size, write);
e35e2b11
TY
398 }
399}
400
d38ceaf9 401/*
f7ee1874 402 * register access helper functions.
d38ceaf9 403 */
56b53c0b
DL
404
405/* Check if hw access should be skipped because of hotplug or device error */
406bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
407{
7afefb81 408 if (adev->no_hw_access)
56b53c0b
DL
409 return true;
410
411#ifdef CONFIG_LOCKDEP
412 /*
413 * This is a bit complicated to understand, so worth a comment. What we assert
414 * here is that the GPU reset is not running on another thread in parallel.
415 *
416 * For this we trylock the read side of the reset semaphore, if that succeeds
417 * we know that the reset is not running in paralell.
418 *
419 * If the trylock fails we assert that we are either already holding the read
420 * side of the lock or are the reset thread itself and hold the write side of
421 * the lock.
422 */
423 if (in_task()) {
d0fb18b5
AG
424 if (down_read_trylock(&adev->reset_domain->sem))
425 up_read(&adev->reset_domain->sem);
56b53c0b 426 else
d0fb18b5 427 lockdep_assert_held(&adev->reset_domain->sem);
56b53c0b
DL
428 }
429#endif
430 return false;
431}
432
e3ecdffa 433/**
f7ee1874 434 * amdgpu_device_rreg - read a memory mapped IO or indirect register
e3ecdffa
AD
435 *
436 * @adev: amdgpu_device pointer
437 * @reg: dword aligned register offset
438 * @acc_flags: access flags which require special behavior
439 *
440 * Returns the 32 bit value from the offset specified.
441 */
f7ee1874
HZ
442uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
443 uint32_t reg, uint32_t acc_flags)
d38ceaf9 444{
f4b373f4
TSD
445 uint32_t ret;
446
56b53c0b 447 if (amdgpu_device_skip_hw_access(adev))
bf36b52e
AG
448 return 0;
449
f7ee1874
HZ
450 if ((reg * 4) < adev->rmmio_size) {
451 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
452 amdgpu_sriov_runtime(adev) &&
d0fb18b5 453 down_read_trylock(&adev->reset_domain->sem)) {
f7ee1874 454 ret = amdgpu_kiq_rreg(adev, reg);
d0fb18b5 455 up_read(&adev->reset_domain->sem);
f7ee1874
HZ
456 } else {
457 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
458 }
459 } else {
460 ret = adev->pcie_rreg(adev, reg * 4);
81202807 461 }
bc992ba5 462
f7ee1874 463 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
e78b579d 464
f4b373f4 465 return ret;
d38ceaf9
AD
466}
467
421a2a30
ML
468/*
469 * MMIO register read with bytes helper functions
470 * @offset:bytes offset from MMIO start
471 *
472*/
473
e3ecdffa
AD
474/**
475 * amdgpu_mm_rreg8 - read a memory mapped IO register
476 *
477 * @adev: amdgpu_device pointer
478 * @offset: byte aligned register offset
479 *
480 * Returns the 8 bit value from the offset specified.
481 */
7cbbc745
AG
482uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
483{
56b53c0b 484 if (amdgpu_device_skip_hw_access(adev))
bf36b52e
AG
485 return 0;
486
421a2a30
ML
487 if (offset < adev->rmmio_size)
488 return (readb(adev->rmmio + offset));
489 BUG();
490}
491
492/*
493 * MMIO register write with bytes helper functions
494 * @offset:bytes offset from MMIO start
495 * @value: the value want to be written to the register
496 *
497*/
e3ecdffa
AD
498/**
499 * amdgpu_mm_wreg8 - read a memory mapped IO register
500 *
501 * @adev: amdgpu_device pointer
502 * @offset: byte aligned register offset
503 * @value: 8 bit value to write
504 *
505 * Writes the value specified to the offset specified.
506 */
7cbbc745
AG
507void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
508{
56b53c0b 509 if (amdgpu_device_skip_hw_access(adev))
bf36b52e
AG
510 return;
511
421a2a30
ML
512 if (offset < adev->rmmio_size)
513 writeb(value, adev->rmmio + offset);
514 else
515 BUG();
516}
517
e3ecdffa 518/**
f7ee1874 519 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
e3ecdffa
AD
520 *
521 * @adev: amdgpu_device pointer
522 * @reg: dword aligned register offset
523 * @v: 32 bit value to write to the register
524 * @acc_flags: access flags which require special behavior
525 *
526 * Writes the value specified to the offset specified.
527 */
f7ee1874
HZ
528void amdgpu_device_wreg(struct amdgpu_device *adev,
529 uint32_t reg, uint32_t v,
530 uint32_t acc_flags)
d38ceaf9 531{
56b53c0b 532 if (amdgpu_device_skip_hw_access(adev))
bf36b52e
AG
533 return;
534
f7ee1874
HZ
535 if ((reg * 4) < adev->rmmio_size) {
536 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
537 amdgpu_sriov_runtime(adev) &&
d0fb18b5 538 down_read_trylock(&adev->reset_domain->sem)) {
f7ee1874 539 amdgpu_kiq_wreg(adev, reg, v);
d0fb18b5 540 up_read(&adev->reset_domain->sem);
f7ee1874
HZ
541 } else {
542 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
543 }
544 } else {
545 adev->pcie_wreg(adev, reg * 4, v);
81202807 546 }
bc992ba5 547
f7ee1874 548 trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
2e0cc4d4 549}
d38ceaf9 550
03f2abb0 551/**
4cc9f86f 552 * amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if in range
2e0cc4d4 553 *
71579346
RB
554 * @adev: amdgpu_device pointer
555 * @reg: mmio/rlc register
556 * @v: value to write
557 *
558 * this function is invoked only for the debugfs register access
03f2abb0 559 */
f7ee1874
HZ
560void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
561 uint32_t reg, uint32_t v)
2e0cc4d4 562{
56b53c0b 563 if (amdgpu_device_skip_hw_access(adev))
bf36b52e
AG
564 return;
565
2e0cc4d4 566 if (amdgpu_sriov_fullaccess(adev) &&
f7ee1874
HZ
567 adev->gfx.rlc.funcs &&
568 adev->gfx.rlc.funcs->is_rlcg_access_range) {
2e0cc4d4 569 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
1b2dc99e 570 return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
4cc9f86f
TSD
571 } else if ((reg * 4) >= adev->rmmio_size) {
572 adev->pcie_wreg(adev, reg * 4, v);
f7ee1874
HZ
573 } else {
574 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
47ed4e1c 575 }
d38ceaf9
AD
576}
577
d38ceaf9
AD
578/**
579 * amdgpu_mm_rdoorbell - read a doorbell dword
580 *
581 * @adev: amdgpu_device pointer
582 * @index: doorbell index
583 *
584 * Returns the value in the doorbell aperture at the
585 * requested doorbell index (CIK).
586 */
587u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
588{
56b53c0b 589 if (amdgpu_device_skip_hw_access(adev))
bf36b52e
AG
590 return 0;
591
d38ceaf9
AD
592 if (index < adev->doorbell.num_doorbells) {
593 return readl(adev->doorbell.ptr + index);
594 } else {
595 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
596 return 0;
597 }
598}
599
600/**
601 * amdgpu_mm_wdoorbell - write a doorbell dword
602 *
603 * @adev: amdgpu_device pointer
604 * @index: doorbell index
605 * @v: value to write
606 *
607 * Writes @v to the doorbell aperture at the
608 * requested doorbell index (CIK).
609 */
610void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
611{
56b53c0b 612 if (amdgpu_device_skip_hw_access(adev))
bf36b52e
AG
613 return;
614
d38ceaf9
AD
615 if (index < adev->doorbell.num_doorbells) {
616 writel(v, adev->doorbell.ptr + index);
617 } else {
618 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
619 }
620}
621
832be404
KW
622/**
623 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
624 *
625 * @adev: amdgpu_device pointer
626 * @index: doorbell index
627 *
628 * Returns the value in the doorbell aperture at the
629 * requested doorbell index (VEGA10+).
630 */
631u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
632{
56b53c0b 633 if (amdgpu_device_skip_hw_access(adev))
bf36b52e
AG
634 return 0;
635
832be404
KW
636 if (index < adev->doorbell.num_doorbells) {
637 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
638 } else {
639 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
640 return 0;
641 }
642}
643
644/**
645 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
646 *
647 * @adev: amdgpu_device pointer
648 * @index: doorbell index
649 * @v: value to write
650 *
651 * Writes @v to the doorbell aperture at the
652 * requested doorbell index (VEGA10+).
653 */
654void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
655{
56b53c0b 656 if (amdgpu_device_skip_hw_access(adev))
bf36b52e
AG
657 return;
658
832be404
KW
659 if (index < adev->doorbell.num_doorbells) {
660 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
661 } else {
662 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
663 }
664}
665
1bba3683
HZ
666/**
667 * amdgpu_device_indirect_rreg - read an indirect register
668 *
669 * @adev: amdgpu_device pointer
670 * @pcie_index: mmio register offset
671 * @pcie_data: mmio register offset
22f453fb 672 * @reg_addr: indirect register address to read from
1bba3683
HZ
673 *
674 * Returns the value of indirect register @reg_addr
675 */
676u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
677 u32 pcie_index, u32 pcie_data,
678 u32 reg_addr)
679{
680 unsigned long flags;
681 u32 r;
682 void __iomem *pcie_index_offset;
683 void __iomem *pcie_data_offset;
684
685 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
686 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
687 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
688
689 writel(reg_addr, pcie_index_offset);
690 readl(pcie_index_offset);
691 r = readl(pcie_data_offset);
692 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
693
694 return r;
695}
696
697/**
698 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
699 *
700 * @adev: amdgpu_device pointer
701 * @pcie_index: mmio register offset
702 * @pcie_data: mmio register offset
22f453fb 703 * @reg_addr: indirect register address to read from
1bba3683
HZ
704 *
705 * Returns the value of indirect register @reg_addr
706 */
707u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
708 u32 pcie_index, u32 pcie_data,
709 u32 reg_addr)
710{
711 unsigned long flags;
712 u64 r;
713 void __iomem *pcie_index_offset;
714 void __iomem *pcie_data_offset;
715
716 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
717 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
718 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
719
720 /* read low 32 bits */
721 writel(reg_addr, pcie_index_offset);
722 readl(pcie_index_offset);
723 r = readl(pcie_data_offset);
724 /* read high 32 bits */
725 writel(reg_addr + 4, pcie_index_offset);
726 readl(pcie_index_offset);
727 r |= ((u64)readl(pcie_data_offset) << 32);
728 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
729
730 return r;
731}
732
733/**
734 * amdgpu_device_indirect_wreg - write an indirect register address
735 *
736 * @adev: amdgpu_device pointer
737 * @pcie_index: mmio register offset
738 * @pcie_data: mmio register offset
739 * @reg_addr: indirect register offset
740 * @reg_data: indirect register data
741 *
742 */
743void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
744 u32 pcie_index, u32 pcie_data,
745 u32 reg_addr, u32 reg_data)
746{
747 unsigned long flags;
748 void __iomem *pcie_index_offset;
749 void __iomem *pcie_data_offset;
750
751 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
752 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
753 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
754
755 writel(reg_addr, pcie_index_offset);
756 readl(pcie_index_offset);
757 writel(reg_data, pcie_data_offset);
758 readl(pcie_data_offset);
759 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
760}
761
762/**
763 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
764 *
765 * @adev: amdgpu_device pointer
766 * @pcie_index: mmio register offset
767 * @pcie_data: mmio register offset
768 * @reg_addr: indirect register offset
769 * @reg_data: indirect register data
770 *
771 */
772void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
773 u32 pcie_index, u32 pcie_data,
774 u32 reg_addr, u64 reg_data)
775{
776 unsigned long flags;
777 void __iomem *pcie_index_offset;
778 void __iomem *pcie_data_offset;
779
780 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
781 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
782 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
783
784 /* write low 32 bits */
785 writel(reg_addr, pcie_index_offset);
786 readl(pcie_index_offset);
787 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
788 readl(pcie_data_offset);
789 /* write high 32 bits */
790 writel(reg_addr + 4, pcie_index_offset);
791 readl(pcie_index_offset);
792 writel((u32)(reg_data >> 32), pcie_data_offset);
793 readl(pcie_data_offset);
794 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
795}
796
d38ceaf9
AD
797/**
798 * amdgpu_invalid_rreg - dummy reg read function
799 *
982a820b 800 * @adev: amdgpu_device pointer
d38ceaf9
AD
801 * @reg: offset of register
802 *
803 * Dummy register read function. Used for register blocks
804 * that certain asics don't have (all asics).
805 * Returns the value in the register.
806 */
807static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
808{
809 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
810 BUG();
811 return 0;
812}
813
814/**
815 * amdgpu_invalid_wreg - dummy reg write function
816 *
982a820b 817 * @adev: amdgpu_device pointer
d38ceaf9
AD
818 * @reg: offset of register
819 * @v: value to write to the register
820 *
821 * Dummy register read function. Used for register blocks
822 * that certain asics don't have (all asics).
823 */
824static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
825{
826 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
827 reg, v);
828 BUG();
829}
830
4fa1c6a6
TZ
831/**
832 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
833 *
982a820b 834 * @adev: amdgpu_device pointer
4fa1c6a6
TZ
835 * @reg: offset of register
836 *
837 * Dummy register read function. Used for register blocks
838 * that certain asics don't have (all asics).
839 * Returns the value in the register.
840 */
841static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
842{
843 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
844 BUG();
845 return 0;
846}
847
848/**
849 * amdgpu_invalid_wreg64 - dummy reg write function
850 *
982a820b 851 * @adev: amdgpu_device pointer
4fa1c6a6
TZ
852 * @reg: offset of register
853 * @v: value to write to the register
854 *
855 * Dummy register read function. Used for register blocks
856 * that certain asics don't have (all asics).
857 */
858static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
859{
860 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
861 reg, v);
862 BUG();
863}
864
d38ceaf9
AD
865/**
866 * amdgpu_block_invalid_rreg - dummy reg read function
867 *
982a820b 868 * @adev: amdgpu_device pointer
d38ceaf9
AD
869 * @block: offset of instance
870 * @reg: offset of register
871 *
872 * Dummy register read function. Used for register blocks
873 * that certain asics don't have (all asics).
874 * Returns the value in the register.
875 */
876static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
877 uint32_t block, uint32_t reg)
878{
879 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
880 reg, block);
881 BUG();
882 return 0;
883}
884
885/**
886 * amdgpu_block_invalid_wreg - dummy reg write function
887 *
982a820b 888 * @adev: amdgpu_device pointer
d38ceaf9
AD
889 * @block: offset of instance
890 * @reg: offset of register
891 * @v: value to write to the register
892 *
893 * Dummy register read function. Used for register blocks
894 * that certain asics don't have (all asics).
895 */
896static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
897 uint32_t block,
898 uint32_t reg, uint32_t v)
899{
900 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
901 reg, block, v);
902 BUG();
903}
904
4d2997ab
AD
905/**
906 * amdgpu_device_asic_init - Wrapper for atom asic_init
907 *
982a820b 908 * @adev: amdgpu_device pointer
4d2997ab
AD
909 *
910 * Does any asic specific work and then calls atom asic init.
911 */
912static int amdgpu_device_asic_init(struct amdgpu_device *adev)
913{
914 amdgpu_asic_pre_asic_init(adev);
915
85d1bcc6
HZ
916 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
917 return amdgpu_atomfirmware_asic_init(adev, true);
918 else
919 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
4d2997ab
AD
920}
921
e3ecdffa
AD
922/**
923 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
924 *
982a820b 925 * @adev: amdgpu_device pointer
e3ecdffa
AD
926 *
927 * Allocates a scratch page of VRAM for use by various things in the
928 * driver.
929 */
06ec9070 930static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
d38ceaf9 931{
a4a02777
CK
932 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
933 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
934 &adev->vram_scratch.robj,
935 &adev->vram_scratch.gpu_addr,
936 (void **)&adev->vram_scratch.ptr);
d38ceaf9
AD
937}
938
e3ecdffa
AD
939/**
940 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
941 *
982a820b 942 * @adev: amdgpu_device pointer
e3ecdffa
AD
943 *
944 * Frees the VRAM scratch page.
945 */
06ec9070 946static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
d38ceaf9 947{
078af1a3 948 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
d38ceaf9
AD
949}
950
951/**
9c3f2b54 952 * amdgpu_device_program_register_sequence - program an array of registers.
d38ceaf9
AD
953 *
954 * @adev: amdgpu_device pointer
955 * @registers: pointer to the register array
956 * @array_size: size of the register array
957 *
958 * Programs an array or registers with and and or masks.
959 * This is a helper for setting golden registers.
960 */
9c3f2b54
AD
961void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
962 const u32 *registers,
963 const u32 array_size)
d38ceaf9
AD
964{
965 u32 tmp, reg, and_mask, or_mask;
966 int i;
967
968 if (array_size % 3)
969 return;
970
971 for (i = 0; i < array_size; i +=3) {
972 reg = registers[i + 0];
973 and_mask = registers[i + 1];
974 or_mask = registers[i + 2];
975
976 if (and_mask == 0xffffffff) {
977 tmp = or_mask;
978 } else {
979 tmp = RREG32(reg);
980 tmp &= ~and_mask;
e0d07657
HZ
981 if (adev->family >= AMDGPU_FAMILY_AI)
982 tmp |= (or_mask & and_mask);
983 else
984 tmp |= or_mask;
d38ceaf9
AD
985 }
986 WREG32(reg, tmp);
987 }
988}
989
e3ecdffa
AD
990/**
991 * amdgpu_device_pci_config_reset - reset the GPU
992 *
993 * @adev: amdgpu_device pointer
994 *
995 * Resets the GPU using the pci config reset sequence.
996 * Only applicable to asics prior to vega10.
997 */
8111c387 998void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
d38ceaf9
AD
999{
1000 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1001}
1002
af484df8
AD
1003/**
1004 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1005 *
1006 * @adev: amdgpu_device pointer
1007 *
1008 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1009 */
1010int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1011{
1012 return pci_reset_function(adev->pdev);
1013}
1014
d38ceaf9
AD
1015/*
1016 * GPU doorbell aperture helpers function.
1017 */
1018/**
06ec9070 1019 * amdgpu_device_doorbell_init - Init doorbell driver information.
d38ceaf9
AD
1020 *
1021 * @adev: amdgpu_device pointer
1022 *
1023 * Init doorbell driver information (CIK)
1024 * Returns 0 on success, error on failure.
1025 */
06ec9070 1026static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
d38ceaf9 1027{
6585661d 1028
705e519e
CK
1029 /* No doorbell on SI hardware generation */
1030 if (adev->asic_type < CHIP_BONAIRE) {
1031 adev->doorbell.base = 0;
1032 adev->doorbell.size = 0;
1033 adev->doorbell.num_doorbells = 0;
1034 adev->doorbell.ptr = NULL;
1035 return 0;
1036 }
1037
d6895ad3
CK
1038 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1039 return -EINVAL;
1040
22357775
AD
1041 amdgpu_asic_init_doorbell_index(adev);
1042
d38ceaf9
AD
1043 /* doorbell bar mapping */
1044 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1045 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1046
de33a329
JX
1047 if (adev->enable_mes) {
1048 adev->doorbell.num_doorbells =
1049 adev->doorbell.size / sizeof(u32);
1050 } else {
1051 adev->doorbell.num_doorbells =
1052 min_t(u32, adev->doorbell.size / sizeof(u32),
1053 adev->doorbell_index.max_assignment+1);
1054 if (adev->doorbell.num_doorbells == 0)
1055 return -EINVAL;
1056
1057 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
1058 * paging queue doorbell use the second page. The
1059 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1060 * doorbells are in the first page. So with paging queue enabled,
1061 * the max num_doorbells should + 1 page (0x400 in dword)
1062 */
1063 if (adev->asic_type >= CHIP_VEGA10)
1064 adev->doorbell.num_doorbells += 0x400;
1065 }
ec3db8a6 1066
8972e5d2
CK
1067 adev->doorbell.ptr = ioremap(adev->doorbell.base,
1068 adev->doorbell.num_doorbells *
1069 sizeof(u32));
1070 if (adev->doorbell.ptr == NULL)
d38ceaf9 1071 return -ENOMEM;
d38ceaf9
AD
1072
1073 return 0;
1074}
1075
1076/**
06ec9070 1077 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
d38ceaf9
AD
1078 *
1079 * @adev: amdgpu_device pointer
1080 *
1081 * Tear down doorbell driver information (CIK)
1082 */
06ec9070 1083static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
d38ceaf9
AD
1084{
1085 iounmap(adev->doorbell.ptr);
1086 adev->doorbell.ptr = NULL;
1087}
1088
22cb0164 1089
d38ceaf9
AD
1090
1091/*
06ec9070 1092 * amdgpu_device_wb_*()
455a7bc2 1093 * Writeback is the method by which the GPU updates special pages in memory
ea81a173 1094 * with the status of certain GPU events (fences, ring pointers,etc.).
d38ceaf9
AD
1095 */
1096
1097/**
06ec9070 1098 * amdgpu_device_wb_fini - Disable Writeback and free memory
d38ceaf9
AD
1099 *
1100 * @adev: amdgpu_device pointer
1101 *
1102 * Disables Writeback and frees the Writeback memory (all asics).
1103 * Used at driver shutdown.
1104 */
06ec9070 1105static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
d38ceaf9
AD
1106{
1107 if (adev->wb.wb_obj) {
a76ed485
AD
1108 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1109 &adev->wb.gpu_addr,
1110 (void **)&adev->wb.wb);
d38ceaf9
AD
1111 adev->wb.wb_obj = NULL;
1112 }
1113}
1114
1115/**
03f2abb0 1116 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
d38ceaf9
AD
1117 *
1118 * @adev: amdgpu_device pointer
1119 *
455a7bc2 1120 * Initializes writeback and allocates writeback memory (all asics).
d38ceaf9
AD
1121 * Used at driver startup.
1122 * Returns 0 on success or an -error on failure.
1123 */
06ec9070 1124static int amdgpu_device_wb_init(struct amdgpu_device *adev)
d38ceaf9
AD
1125{
1126 int r;
1127
1128 if (adev->wb.wb_obj == NULL) {
97407b63
AD
1129 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1130 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
a76ed485
AD
1131 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1132 &adev->wb.wb_obj, &adev->wb.gpu_addr,
1133 (void **)&adev->wb.wb);
d38ceaf9
AD
1134 if (r) {
1135 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1136 return r;
1137 }
d38ceaf9
AD
1138
1139 adev->wb.num_wb = AMDGPU_MAX_WB;
1140 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1141
1142 /* clear wb memory */
73469585 1143 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
d38ceaf9
AD
1144 }
1145
1146 return 0;
1147}
1148
1149/**
131b4b36 1150 * amdgpu_device_wb_get - Allocate a wb entry
d38ceaf9
AD
1151 *
1152 * @adev: amdgpu_device pointer
1153 * @wb: wb index
1154 *
1155 * Allocate a wb slot for use by the driver (all asics).
1156 * Returns 0 on success or -EINVAL on failure.
1157 */
131b4b36 1158int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
d38ceaf9
AD
1159{
1160 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
d38ceaf9 1161
97407b63 1162 if (offset < adev->wb.num_wb) {
7014285a 1163 __set_bit(offset, adev->wb.used);
63ae07ca 1164 *wb = offset << 3; /* convert to dw offset */
0915fdbc
ML
1165 return 0;
1166 } else {
1167 return -EINVAL;
1168 }
1169}
1170
d38ceaf9 1171/**
131b4b36 1172 * amdgpu_device_wb_free - Free a wb entry
d38ceaf9
AD
1173 *
1174 * @adev: amdgpu_device pointer
1175 * @wb: wb index
1176 *
1177 * Free a wb slot allocated for use by the driver (all asics)
1178 */
131b4b36 1179void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
d38ceaf9 1180{
73469585 1181 wb >>= 3;
d38ceaf9 1182 if (wb < adev->wb.num_wb)
73469585 1183 __clear_bit(wb, adev->wb.used);
d38ceaf9
AD
1184}
1185
d6895ad3
CK
1186/**
1187 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1188 *
1189 * @adev: amdgpu_device pointer
1190 *
1191 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1192 * to fail, but if any of the BARs is not accessible after the size we abort
1193 * driver loading by returning -ENODEV.
1194 */
1195int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1196{
453f617a 1197 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
31b8adab
CK
1198 struct pci_bus *root;
1199 struct resource *res;
1200 unsigned i;
d6895ad3
CK
1201 u16 cmd;
1202 int r;
1203
0c03b912 1204 /* Bypass for VF */
1205 if (amdgpu_sriov_vf(adev))
1206 return 0;
1207
b7221f2b
AD
1208 /* skip if the bios has already enabled large BAR */
1209 if (adev->gmc.real_vram_size &&
1210 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1211 return 0;
1212
31b8adab
CK
1213 /* Check if the root BUS has 64bit memory resources */
1214 root = adev->pdev->bus;
1215 while (root->parent)
1216 root = root->parent;
1217
1218 pci_bus_for_each_resource(root, res, i) {
0ebb7c54 1219 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
31b8adab
CK
1220 res->start > 0x100000000ull)
1221 break;
1222 }
1223
1224 /* Trying to resize is pointless without a root hub window above 4GB */
1225 if (!res)
1226 return 0;
1227
453f617a
ND
1228 /* Limit the BAR size to what is available */
1229 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1230 rbar_size);
1231
d6895ad3
CK
1232 /* Disable memory decoding while we change the BAR addresses and size */
1233 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1234 pci_write_config_word(adev->pdev, PCI_COMMAND,
1235 cmd & ~PCI_COMMAND_MEMORY);
1236
1237 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
06ec9070 1238 amdgpu_device_doorbell_fini(adev);
d6895ad3
CK
1239 if (adev->asic_type >= CHIP_BONAIRE)
1240 pci_release_resource(adev->pdev, 2);
1241
1242 pci_release_resource(adev->pdev, 0);
1243
1244 r = pci_resize_resource(adev->pdev, 0, rbar_size);
1245 if (r == -ENOSPC)
1246 DRM_INFO("Not enough PCI address space for a large BAR.");
1247 else if (r && r != -ENOTSUPP)
1248 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1249
1250 pci_assign_unassigned_bus_resources(adev->pdev->bus);
1251
1252 /* When the doorbell or fb BAR isn't available we have no chance of
1253 * using the device.
1254 */
06ec9070 1255 r = amdgpu_device_doorbell_init(adev);
d6895ad3
CK
1256 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1257 return -ENODEV;
1258
1259 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1260
1261 return 0;
1262}
a05502e5 1263
d38ceaf9
AD
1264/*
1265 * GPU helpers function.
1266 */
1267/**
39c640c0 1268 * amdgpu_device_need_post - check if the hw need post or not
d38ceaf9
AD
1269 *
1270 * @adev: amdgpu_device pointer
1271 *
c836fec5
JQ
1272 * Check if the asic has been initialized (all asics) at driver startup
1273 * or post is needed if hw reset is performed.
1274 * Returns true if need or false if not.
d38ceaf9 1275 */
39c640c0 1276bool amdgpu_device_need_post(struct amdgpu_device *adev)
d38ceaf9
AD
1277{
1278 uint32_t reg;
1279
bec86378
ML
1280 if (amdgpu_sriov_vf(adev))
1281 return false;
1282
1283 if (amdgpu_passthrough(adev)) {
1da2c326
ML
1284 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1285 * some old smc fw still need driver do vPost otherwise gpu hang, while
1286 * those smc fw version above 22.15 doesn't have this flaw, so we force
1287 * vpost executed for smc version below 22.15
bec86378
ML
1288 */
1289 if (adev->asic_type == CHIP_FIJI) {
1290 int err;
1291 uint32_t fw_ver;
1292 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1293 /* force vPost if error occured */
1294 if (err)
1295 return true;
1296
1297 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1da2c326
ML
1298 if (fw_ver < 0x00160e00)
1299 return true;
bec86378 1300 }
bec86378 1301 }
91fe77eb 1302
e3c1b071 1303 /* Don't post if we need to reset whole hive on init */
1304 if (adev->gmc.xgmi.pending_reset)
1305 return false;
1306
91fe77eb 1307 if (adev->has_hw_reset) {
1308 adev->has_hw_reset = false;
1309 return true;
1310 }
1311
1312 /* bios scratch used on CIK+ */
1313 if (adev->asic_type >= CHIP_BONAIRE)
1314 return amdgpu_atombios_scratch_need_asic_init(adev);
1315
1316 /* check MEM_SIZE for older asics */
1317 reg = amdgpu_asic_get_config_memsize(adev);
1318
1319 if ((reg != 0) && (reg != 0xffffffff))
1320 return false;
1321
1322 return true;
bec86378
ML
1323}
1324
0ab5d711
ML
1325/**
1326 * amdgpu_device_should_use_aspm - check if the device should program ASPM
1327 *
1328 * @adev: amdgpu_device pointer
1329 *
1330 * Confirm whether the module parameter and pcie bridge agree that ASPM should
1331 * be set for this device.
1332 *
1333 * Returns true if it should be used or false if not.
1334 */
1335bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1336{
1337 switch (amdgpu_aspm) {
1338 case -1:
1339 break;
1340 case 0:
1341 return false;
1342 case 1:
1343 return true;
1344 default:
1345 return false;
1346 }
1347 return pcie_aspm_enabled(adev->pdev);
1348}
1349
d38ceaf9
AD
1350/* if we get transitioned to only one device, take VGA back */
1351/**
06ec9070 1352 * amdgpu_device_vga_set_decode - enable/disable vga decode
d38ceaf9 1353 *
bf44e8ce 1354 * @pdev: PCI device pointer
d38ceaf9
AD
1355 * @state: enable/disable vga decode
1356 *
1357 * Enable/disable vga decode (all asics).
1358 * Returns VGA resource flags.
1359 */
bf44e8ce
CH
1360static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1361 bool state)
d38ceaf9 1362{
bf44e8ce 1363 struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
d38ceaf9
AD
1364 amdgpu_asic_set_vga_state(adev, state);
1365 if (state)
1366 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1367 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1368 else
1369 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1370}
1371
e3ecdffa
AD
1372/**
1373 * amdgpu_device_check_block_size - validate the vm block size
1374 *
1375 * @adev: amdgpu_device pointer
1376 *
1377 * Validates the vm block size specified via module parameter.
1378 * The vm block size defines number of bits in page table versus page directory,
1379 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1380 * page table and the remaining bits are in the page directory.
1381 */
06ec9070 1382static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
a1adf8be
CZ
1383{
1384 /* defines number of bits in page table versus page directory,
1385 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1386 * page table and the remaining bits are in the page directory */
bab4fee7
JZ
1387 if (amdgpu_vm_block_size == -1)
1388 return;
a1adf8be 1389
bab4fee7 1390 if (amdgpu_vm_block_size < 9) {
a1adf8be
CZ
1391 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1392 amdgpu_vm_block_size);
97489129 1393 amdgpu_vm_block_size = -1;
a1adf8be 1394 }
a1adf8be
CZ
1395}
1396
e3ecdffa
AD
1397/**
1398 * amdgpu_device_check_vm_size - validate the vm size
1399 *
1400 * @adev: amdgpu_device pointer
1401 *
1402 * Validates the vm size in GB specified via module parameter.
1403 * The VM size is the size of the GPU virtual memory space in GB.
1404 */
06ec9070 1405static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
83ca145d 1406{
64dab074
AD
1407 /* no need to check the default value */
1408 if (amdgpu_vm_size == -1)
1409 return;
1410
83ca145d
ZJ
1411 if (amdgpu_vm_size < 1) {
1412 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1413 amdgpu_vm_size);
f3368128 1414 amdgpu_vm_size = -1;
83ca145d 1415 }
83ca145d
ZJ
1416}
1417
7951e376
RZ
1418static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1419{
1420 struct sysinfo si;
a9d4fe2f 1421 bool is_os_64 = (sizeof(void *) == 8);
7951e376
RZ
1422 uint64_t total_memory;
1423 uint64_t dram_size_seven_GB = 0x1B8000000;
1424 uint64_t dram_size_three_GB = 0xB8000000;
1425
1426 if (amdgpu_smu_memory_pool_size == 0)
1427 return;
1428
1429 if (!is_os_64) {
1430 DRM_WARN("Not 64-bit OS, feature not supported\n");
1431 goto def_value;
1432 }
1433 si_meminfo(&si);
1434 total_memory = (uint64_t)si.totalram * si.mem_unit;
1435
1436 if ((amdgpu_smu_memory_pool_size == 1) ||
1437 (amdgpu_smu_memory_pool_size == 2)) {
1438 if (total_memory < dram_size_three_GB)
1439 goto def_value1;
1440 } else if ((amdgpu_smu_memory_pool_size == 4) ||
1441 (amdgpu_smu_memory_pool_size == 8)) {
1442 if (total_memory < dram_size_seven_GB)
1443 goto def_value1;
1444 } else {
1445 DRM_WARN("Smu memory pool size not supported\n");
1446 goto def_value;
1447 }
1448 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1449
1450 return;
1451
1452def_value1:
1453 DRM_WARN("No enough system memory\n");
1454def_value:
1455 adev->pm.smu_prv_buffer_size = 0;
1456}
1457
9f6a7857
HR
1458static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1459{
1460 if (!(adev->flags & AMD_IS_APU) ||
1461 adev->asic_type < CHIP_RAVEN)
1462 return 0;
1463
1464 switch (adev->asic_type) {
1465 case CHIP_RAVEN:
1466 if (adev->pdev->device == 0x15dd)
1467 adev->apu_flags |= AMD_APU_IS_RAVEN;
1468 if (adev->pdev->device == 0x15d8)
1469 adev->apu_flags |= AMD_APU_IS_PICASSO;
1470 break;
1471 case CHIP_RENOIR:
1472 if ((adev->pdev->device == 0x1636) ||
1473 (adev->pdev->device == 0x164c))
1474 adev->apu_flags |= AMD_APU_IS_RENOIR;
1475 else
1476 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1477 break;
1478 case CHIP_VANGOGH:
1479 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1480 break;
1481 case CHIP_YELLOW_CARP:
1482 break;
d0f56dc2 1483 case CHIP_CYAN_SKILLFISH:
dfcc3e8c
AD
1484 if ((adev->pdev->device == 0x13FE) ||
1485 (adev->pdev->device == 0x143F))
d0f56dc2
TZ
1486 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1487 break;
9f6a7857 1488 default:
4eaf21b7 1489 break;
9f6a7857
HR
1490 }
1491
1492 return 0;
1493}
1494
d38ceaf9 1495/**
06ec9070 1496 * amdgpu_device_check_arguments - validate module params
d38ceaf9
AD
1497 *
1498 * @adev: amdgpu_device pointer
1499 *
1500 * Validates certain module parameters and updates
1501 * the associated values used by the driver (all asics).
1502 */
912dfc84 1503static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
d38ceaf9 1504{
5b011235
CZ
1505 if (amdgpu_sched_jobs < 4) {
1506 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1507 amdgpu_sched_jobs);
1508 amdgpu_sched_jobs = 4;
76117507 1509 } else if (!is_power_of_2(amdgpu_sched_jobs)){
5b011235
CZ
1510 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1511 amdgpu_sched_jobs);
1512 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1513 }
d38ceaf9 1514
83e74db6 1515 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
f9321cc4
CK
1516 /* gart size must be greater or equal to 32M */
1517 dev_warn(adev->dev, "gart size (%d) too small\n",
1518 amdgpu_gart_size);
83e74db6 1519 amdgpu_gart_size = -1;
d38ceaf9
AD
1520 }
1521
36d38372 1522 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
c4e1a13a 1523 /* gtt size must be greater or equal to 32M */
36d38372
CK
1524 dev_warn(adev->dev, "gtt size (%d) too small\n",
1525 amdgpu_gtt_size);
1526 amdgpu_gtt_size = -1;
d38ceaf9
AD
1527 }
1528
d07f14be
RH
1529 /* valid range is between 4 and 9 inclusive */
1530 if (amdgpu_vm_fragment_size != -1 &&
1531 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1532 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1533 amdgpu_vm_fragment_size = -1;
1534 }
1535
5d5bd5e3
KW
1536 if (amdgpu_sched_hw_submission < 2) {
1537 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1538 amdgpu_sched_hw_submission);
1539 amdgpu_sched_hw_submission = 2;
1540 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1541 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1542 amdgpu_sched_hw_submission);
1543 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1544 }
1545
2656fd23
AG
1546 if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1547 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1548 amdgpu_reset_method = -1;
1549 }
1550
7951e376
RZ
1551 amdgpu_device_check_smu_prv_buffer_size(adev);
1552
06ec9070 1553 amdgpu_device_check_vm_size(adev);
d38ceaf9 1554
06ec9070 1555 amdgpu_device_check_block_size(adev);
6a7f76e7 1556
19aede77 1557 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
912dfc84 1558
c6252390 1559 amdgpu_gmc_tmz_set(adev);
01a8dcec 1560
9b498efa 1561
e3c00faa 1562 return 0;
d38ceaf9
AD
1563}
1564
1565/**
1566 * amdgpu_switcheroo_set_state - set switcheroo state
1567 *
1568 * @pdev: pci dev pointer
1694467b 1569 * @state: vga_switcheroo state
d38ceaf9
AD
1570 *
1571 * Callback for the switcheroo driver. Suspends or resumes the
1572 * the asics before or after it is powered up using ACPI methods.
1573 */
8aba21b7
LT
1574static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1575 enum vga_switcheroo_state state)
d38ceaf9
AD
1576{
1577 struct drm_device *dev = pci_get_drvdata(pdev);
de185019 1578 int r;
d38ceaf9 1579
b98c6299 1580 if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
d38ceaf9
AD
1581 return;
1582
1583 if (state == VGA_SWITCHEROO_ON) {
dd4fa6c1 1584 pr_info("switched on\n");
d38ceaf9
AD
1585 /* don't suspend or resume card normally */
1586 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1587
8f66090b
TZ
1588 pci_set_power_state(pdev, PCI_D0);
1589 amdgpu_device_load_pci_state(pdev);
1590 r = pci_enable_device(pdev);
de185019
AD
1591 if (r)
1592 DRM_WARN("pci_enable_device failed (%d)\n", r);
1593 amdgpu_device_resume(dev, true);
d38ceaf9 1594
d38ceaf9 1595 dev->switch_power_state = DRM_SWITCH_POWER_ON;
d38ceaf9 1596 } else {
dd4fa6c1 1597 pr_info("switched off\n");
d38ceaf9 1598 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
de185019 1599 amdgpu_device_suspend(dev, true);
8f66090b 1600 amdgpu_device_cache_pci_state(pdev);
de185019 1601 /* Shut down the device */
8f66090b
TZ
1602 pci_disable_device(pdev);
1603 pci_set_power_state(pdev, PCI_D3cold);
d38ceaf9
AD
1604 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1605 }
1606}
1607
1608/**
1609 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1610 *
1611 * @pdev: pci dev pointer
1612 *
1613 * Callback for the switcheroo driver. Check of the switcheroo
1614 * state can be changed.
1615 * Returns true if the state can be changed, false if not.
1616 */
1617static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1618{
1619 struct drm_device *dev = pci_get_drvdata(pdev);
1620
1621 /*
1622 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1623 * locking inversion with the driver load path. And the access here is
1624 * completely racy anyway. So don't bother with locking for now.
1625 */
7e13ad89 1626 return atomic_read(&dev->open_count) == 0;
d38ceaf9
AD
1627}
1628
1629static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1630 .set_gpu_state = amdgpu_switcheroo_set_state,
1631 .reprobe = NULL,
1632 .can_switch = amdgpu_switcheroo_can_switch,
1633};
1634
e3ecdffa
AD
1635/**
1636 * amdgpu_device_ip_set_clockgating_state - set the CG state
1637 *
87e3f136 1638 * @dev: amdgpu_device pointer
e3ecdffa
AD
1639 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1640 * @state: clockgating state (gate or ungate)
1641 *
1642 * Sets the requested clockgating state for all instances of
1643 * the hardware IP specified.
1644 * Returns the error code from the last instance.
1645 */
43fa561f 1646int amdgpu_device_ip_set_clockgating_state(void *dev,
2990a1fc
AD
1647 enum amd_ip_block_type block_type,
1648 enum amd_clockgating_state state)
d38ceaf9 1649{
43fa561f 1650 struct amdgpu_device *adev = dev;
d38ceaf9
AD
1651 int i, r = 0;
1652
1653 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1654 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1655 continue;
c722865a
RZ
1656 if (adev->ip_blocks[i].version->type != block_type)
1657 continue;
1658 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1659 continue;
1660 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1661 (void *)adev, state);
1662 if (r)
1663 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1664 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1665 }
1666 return r;
1667}
1668
e3ecdffa
AD
1669/**
1670 * amdgpu_device_ip_set_powergating_state - set the PG state
1671 *
87e3f136 1672 * @dev: amdgpu_device pointer
e3ecdffa
AD
1673 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1674 * @state: powergating state (gate or ungate)
1675 *
1676 * Sets the requested powergating state for all instances of
1677 * the hardware IP specified.
1678 * Returns the error code from the last instance.
1679 */
43fa561f 1680int amdgpu_device_ip_set_powergating_state(void *dev,
2990a1fc
AD
1681 enum amd_ip_block_type block_type,
1682 enum amd_powergating_state state)
d38ceaf9 1683{
43fa561f 1684 struct amdgpu_device *adev = dev;
d38ceaf9
AD
1685 int i, r = 0;
1686
1687 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1688 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1689 continue;
c722865a
RZ
1690 if (adev->ip_blocks[i].version->type != block_type)
1691 continue;
1692 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1693 continue;
1694 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1695 (void *)adev, state);
1696 if (r)
1697 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1698 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1699 }
1700 return r;
1701}
1702
e3ecdffa
AD
1703/**
1704 * amdgpu_device_ip_get_clockgating_state - get the CG state
1705 *
1706 * @adev: amdgpu_device pointer
1707 * @flags: clockgating feature flags
1708 *
1709 * Walks the list of IPs on the device and updates the clockgating
1710 * flags for each IP.
1711 * Updates @flags with the feature flags for each hardware IP where
1712 * clockgating is enabled.
1713 */
2990a1fc 1714void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
25faeddc 1715 u64 *flags)
6cb2d4e4
HR
1716{
1717 int i;
1718
1719 for (i = 0; i < adev->num_ip_blocks; i++) {
1720 if (!adev->ip_blocks[i].status.valid)
1721 continue;
1722 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1723 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1724 }
1725}
1726
e3ecdffa
AD
1727/**
1728 * amdgpu_device_ip_wait_for_idle - wait for idle
1729 *
1730 * @adev: amdgpu_device pointer
1731 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1732 *
1733 * Waits for the request hardware IP to be idle.
1734 * Returns 0 for success or a negative error code on failure.
1735 */
2990a1fc
AD
1736int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1737 enum amd_ip_block_type block_type)
5dbbb60b
AD
1738{
1739 int i, r;
1740
1741 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1742 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1743 continue;
a1255107
AD
1744 if (adev->ip_blocks[i].version->type == block_type) {
1745 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
5dbbb60b
AD
1746 if (r)
1747 return r;
1748 break;
1749 }
1750 }
1751 return 0;
1752
1753}
1754
e3ecdffa
AD
1755/**
1756 * amdgpu_device_ip_is_idle - is the hardware IP idle
1757 *
1758 * @adev: amdgpu_device pointer
1759 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1760 *
1761 * Check if the hardware IP is idle or not.
1762 * Returns true if it the IP is idle, false if not.
1763 */
2990a1fc
AD
1764bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1765 enum amd_ip_block_type block_type)
5dbbb60b
AD
1766{
1767 int i;
1768
1769 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1770 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1771 continue;
a1255107
AD
1772 if (adev->ip_blocks[i].version->type == block_type)
1773 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
5dbbb60b
AD
1774 }
1775 return true;
1776
1777}
1778
e3ecdffa
AD
1779/**
1780 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1781 *
1782 * @adev: amdgpu_device pointer
87e3f136 1783 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
e3ecdffa
AD
1784 *
1785 * Returns a pointer to the hardware IP block structure
1786 * if it exists for the asic, otherwise NULL.
1787 */
2990a1fc
AD
1788struct amdgpu_ip_block *
1789amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1790 enum amd_ip_block_type type)
d38ceaf9
AD
1791{
1792 int i;
1793
1794 for (i = 0; i < adev->num_ip_blocks; i++)
a1255107 1795 if (adev->ip_blocks[i].version->type == type)
d38ceaf9
AD
1796 return &adev->ip_blocks[i];
1797
1798 return NULL;
1799}
1800
1801/**
2990a1fc 1802 * amdgpu_device_ip_block_version_cmp
d38ceaf9
AD
1803 *
1804 * @adev: amdgpu_device pointer
5fc3aeeb 1805 * @type: enum amd_ip_block_type
d38ceaf9
AD
1806 * @major: major version
1807 * @minor: minor version
1808 *
1809 * return 0 if equal or greater
1810 * return 1 if smaller or the ip_block doesn't exist
1811 */
2990a1fc
AD
1812int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1813 enum amd_ip_block_type type,
1814 u32 major, u32 minor)
d38ceaf9 1815{
2990a1fc 1816 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
d38ceaf9 1817
a1255107
AD
1818 if (ip_block && ((ip_block->version->major > major) ||
1819 ((ip_block->version->major == major) &&
1820 (ip_block->version->minor >= minor))))
d38ceaf9
AD
1821 return 0;
1822
1823 return 1;
1824}
1825
a1255107 1826/**
2990a1fc 1827 * amdgpu_device_ip_block_add
a1255107
AD
1828 *
1829 * @adev: amdgpu_device pointer
1830 * @ip_block_version: pointer to the IP to add
1831 *
1832 * Adds the IP block driver information to the collection of IPs
1833 * on the asic.
1834 */
2990a1fc
AD
1835int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1836 const struct amdgpu_ip_block_version *ip_block_version)
a1255107
AD
1837{
1838 if (!ip_block_version)
1839 return -EINVAL;
1840
7bd939d0
LG
1841 switch (ip_block_version->type) {
1842 case AMD_IP_BLOCK_TYPE_VCN:
1843 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1844 return 0;
1845 break;
1846 case AMD_IP_BLOCK_TYPE_JPEG:
1847 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1848 return 0;
1849 break;
1850 default:
1851 break;
1852 }
1853
e966a725 1854 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
a0bae357
HR
1855 ip_block_version->funcs->name);
1856
a1255107
AD
1857 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1858
1859 return 0;
1860}
1861
e3ecdffa
AD
1862/**
1863 * amdgpu_device_enable_virtual_display - enable virtual display feature
1864 *
1865 * @adev: amdgpu_device pointer
1866 *
1867 * Enabled the virtual display feature if the user has enabled it via
1868 * the module parameter virtual_display. This feature provides a virtual
1869 * display hardware on headless boards or in virtualized environments.
1870 * This function parses and validates the configuration string specified by
1871 * the user and configues the virtual display configuration (number of
1872 * virtual connectors, crtcs, etc.) specified.
1873 */
483ef985 1874static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
9accf2fd
ED
1875{
1876 adev->enable_virtual_display = false;
1877
1878 if (amdgpu_virtual_display) {
8f66090b 1879 const char *pci_address_name = pci_name(adev->pdev);
0f66356d 1880 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
9accf2fd
ED
1881
1882 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1883 pciaddstr_tmp = pciaddstr;
0f66356d
ED
1884 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1885 pciaddname = strsep(&pciaddname_tmp, ",");
967de2a9
YT
1886 if (!strcmp("all", pciaddname)
1887 || !strcmp(pci_address_name, pciaddname)) {
0f66356d
ED
1888 long num_crtc;
1889 int res = -1;
1890
9accf2fd 1891 adev->enable_virtual_display = true;
0f66356d
ED
1892
1893 if (pciaddname_tmp)
1894 res = kstrtol(pciaddname_tmp, 10,
1895 &num_crtc);
1896
1897 if (!res) {
1898 if (num_crtc < 1)
1899 num_crtc = 1;
1900 if (num_crtc > 6)
1901 num_crtc = 6;
1902 adev->mode_info.num_crtc = num_crtc;
1903 } else {
1904 adev->mode_info.num_crtc = 1;
1905 }
9accf2fd
ED
1906 break;
1907 }
1908 }
1909
0f66356d
ED
1910 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1911 amdgpu_virtual_display, pci_address_name,
1912 adev->enable_virtual_display, adev->mode_info.num_crtc);
9accf2fd
ED
1913
1914 kfree(pciaddstr);
1915 }
1916}
1917
e3ecdffa
AD
1918/**
1919 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1920 *
1921 * @adev: amdgpu_device pointer
1922 *
1923 * Parses the asic configuration parameters specified in the gpu info
1924 * firmware and makes them availale to the driver for use in configuring
1925 * the asic.
1926 * Returns 0 on success, -EINVAL on failure.
1927 */
e2a75f88
AD
1928static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1929{
e2a75f88 1930 const char *chip_name;
c0a43457 1931 char fw_name[40];
e2a75f88
AD
1932 int err;
1933 const struct gpu_info_firmware_header_v1_0 *hdr;
1934
ab4fe3e1
HR
1935 adev->firmware.gpu_info_fw = NULL;
1936
72de33f8 1937 if (adev->mman.discovery_bin) {
cc375d8c
TY
1938 /*
1939 * FIXME: The bounding box is still needed by Navi12, so
e24d0e91 1940 * temporarily read it from gpu_info firmware. Should be dropped
cc375d8c
TY
1941 * when DAL no longer needs it.
1942 */
1943 if (adev->asic_type != CHIP_NAVI12)
1944 return 0;
258620d0
AD
1945 }
1946
e2a75f88 1947 switch (adev->asic_type) {
e2a75f88
AD
1948#ifdef CONFIG_DRM_AMDGPU_SI
1949 case CHIP_VERDE:
1950 case CHIP_TAHITI:
1951 case CHIP_PITCAIRN:
1952 case CHIP_OLAND:
1953 case CHIP_HAINAN:
1954#endif
1955#ifdef CONFIG_DRM_AMDGPU_CIK
1956 case CHIP_BONAIRE:
1957 case CHIP_HAWAII:
1958 case CHIP_KAVERI:
1959 case CHIP_KABINI:
1960 case CHIP_MULLINS:
1961#endif
da87c30b
AD
1962 case CHIP_TOPAZ:
1963 case CHIP_TONGA:
1964 case CHIP_FIJI:
1965 case CHIP_POLARIS10:
1966 case CHIP_POLARIS11:
1967 case CHIP_POLARIS12:
1968 case CHIP_VEGAM:
1969 case CHIP_CARRIZO:
1970 case CHIP_STONEY:
27c0bc71 1971 case CHIP_VEGA20:
44b3253a 1972 case CHIP_ALDEBARAN:
84d244a3
JC
1973 case CHIP_SIENNA_CICHLID:
1974 case CHIP_NAVY_FLOUNDER:
eac88a5f 1975 case CHIP_DIMGREY_CAVEFISH:
0e5f4b09 1976 case CHIP_BEIGE_GOBY:
e2a75f88
AD
1977 default:
1978 return 0;
1979 case CHIP_VEGA10:
1980 chip_name = "vega10";
1981 break;
3f76dced
AD
1982 case CHIP_VEGA12:
1983 chip_name = "vega12";
1984 break;
2d2e5e7e 1985 case CHIP_RAVEN:
54f78a76 1986 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
54c4d17e 1987 chip_name = "raven2";
54f78a76 1988 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
741deade 1989 chip_name = "picasso";
54c4d17e
FX
1990 else
1991 chip_name = "raven";
2d2e5e7e 1992 break;
65e60f6e
LM
1993 case CHIP_ARCTURUS:
1994 chip_name = "arcturus";
1995 break;
42b325e5
XY
1996 case CHIP_NAVI12:
1997 chip_name = "navi12";
1998 break;
e2a75f88
AD
1999 }
2000
2001 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
ab4fe3e1 2002 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
e2a75f88
AD
2003 if (err) {
2004 dev_err(adev->dev,
2005 "Failed to load gpu_info firmware \"%s\"\n",
2006 fw_name);
2007 goto out;
2008 }
ab4fe3e1 2009 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
e2a75f88
AD
2010 if (err) {
2011 dev_err(adev->dev,
2012 "Failed to validate gpu_info firmware \"%s\"\n",
2013 fw_name);
2014 goto out;
2015 }
2016
ab4fe3e1 2017 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
e2a75f88
AD
2018 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
2019
2020 switch (hdr->version_major) {
2021 case 1:
2022 {
2023 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
ab4fe3e1 2024 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
e2a75f88
AD
2025 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2026
cc375d8c
TY
2027 /*
2028 * Should be droped when DAL no longer needs it.
2029 */
2030 if (adev->asic_type == CHIP_NAVI12)
ec51d3fa
XY
2031 goto parse_soc_bounding_box;
2032
b5ab16bf
AD
2033 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2034 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2035 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2036 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
e2a75f88 2037 adev->gfx.config.max_texture_channel_caches =
b5ab16bf
AD
2038 le32_to_cpu(gpu_info_fw->gc_num_tccs);
2039 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2040 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2041 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2042 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
e2a75f88 2043 adev->gfx.config.double_offchip_lds_buf =
b5ab16bf
AD
2044 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2045 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
51fd0370
HZ
2046 adev->gfx.cu_info.max_waves_per_simd =
2047 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2048 adev->gfx.cu_info.max_scratch_slots_per_cu =
2049 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2050 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
48321c3d 2051 if (hdr->version_minor >= 1) {
35c2e910
HZ
2052 const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2053 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2054 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2055 adev->gfx.config.num_sc_per_sh =
2056 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2057 adev->gfx.config.num_packer_per_sc =
2058 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2059 }
ec51d3fa
XY
2060
2061parse_soc_bounding_box:
ec51d3fa
XY
2062 /*
2063 * soc bounding box info is not integrated in disocovery table,
258620d0 2064 * we always need to parse it from gpu info firmware if needed.
ec51d3fa 2065 */
48321c3d
HW
2066 if (hdr->version_minor == 2) {
2067 const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2068 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2069 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2070 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2071 }
e2a75f88
AD
2072 break;
2073 }
2074 default:
2075 dev_err(adev->dev,
2076 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2077 err = -EINVAL;
2078 goto out;
2079 }
2080out:
e2a75f88
AD
2081 return err;
2082}
2083
e3ecdffa
AD
2084/**
2085 * amdgpu_device_ip_early_init - run early init for hardware IPs
2086 *
2087 * @adev: amdgpu_device pointer
2088 *
2089 * Early initialization pass for hardware IPs. The hardware IPs that make
2090 * up each asic are discovered each IP's early_init callback is run. This
2091 * is the first stage in initializing the asic.
2092 * Returns 0 on success, negative error code on failure.
2093 */
06ec9070 2094static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
d38ceaf9 2095{
901e2be2
AD
2096 struct drm_device *dev = adev_to_drm(adev);
2097 struct pci_dev *parent;
aaa36a97 2098 int i, r;
d38ceaf9 2099
483ef985 2100 amdgpu_device_enable_virtual_display(adev);
a6be7570 2101
00a979f3 2102 if (amdgpu_sriov_vf(adev)) {
00a979f3 2103 r = amdgpu_virt_request_full_gpu(adev, true);
aaa36a97
AD
2104 if (r)
2105 return r;
00a979f3
WS
2106 }
2107
d38ceaf9 2108 switch (adev->asic_type) {
33f34802
KW
2109#ifdef CONFIG_DRM_AMDGPU_SI
2110 case CHIP_VERDE:
2111 case CHIP_TAHITI:
2112 case CHIP_PITCAIRN:
2113 case CHIP_OLAND:
2114 case CHIP_HAINAN:
295d0daf 2115 adev->family = AMDGPU_FAMILY_SI;
33f34802
KW
2116 r = si_set_ip_blocks(adev);
2117 if (r)
2118 return r;
2119 break;
2120#endif
a2e73f56
AD
2121#ifdef CONFIG_DRM_AMDGPU_CIK
2122 case CHIP_BONAIRE:
2123 case CHIP_HAWAII:
2124 case CHIP_KAVERI:
2125 case CHIP_KABINI:
2126 case CHIP_MULLINS:
e1ad2d53 2127 if (adev->flags & AMD_IS_APU)
a2e73f56 2128 adev->family = AMDGPU_FAMILY_KV;
e1ad2d53
AD
2129 else
2130 adev->family = AMDGPU_FAMILY_CI;
a2e73f56
AD
2131
2132 r = cik_set_ip_blocks(adev);
2133 if (r)
2134 return r;
2135 break;
2136#endif
da87c30b
AD
2137 case CHIP_TOPAZ:
2138 case CHIP_TONGA:
2139 case CHIP_FIJI:
2140 case CHIP_POLARIS10:
2141 case CHIP_POLARIS11:
2142 case CHIP_POLARIS12:
2143 case CHIP_VEGAM:
2144 case CHIP_CARRIZO:
2145 case CHIP_STONEY:
2146 if (adev->flags & AMD_IS_APU)
2147 adev->family = AMDGPU_FAMILY_CZ;
2148 else
2149 adev->family = AMDGPU_FAMILY_VI;
2150
2151 r = vi_set_ip_blocks(adev);
2152 if (r)
2153 return r;
2154 break;
d38ceaf9 2155 default:
63352b7f
AD
2156 r = amdgpu_discovery_set_ip_blocks(adev);
2157 if (r)
2158 return r;
2159 break;
d38ceaf9
AD
2160 }
2161
901e2be2
AD
2162 if (amdgpu_has_atpx() &&
2163 (amdgpu_is_atpx_hybrid() ||
2164 amdgpu_has_atpx_dgpu_power_cntl()) &&
2165 ((adev->flags & AMD_IS_APU) == 0) &&
2166 !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
2167 adev->flags |= AMD_IS_PX;
2168
85ac2021
AD
2169 if (!(adev->flags & AMD_IS_APU)) {
2170 parent = pci_upstream_bridge(adev->pdev);
2171 adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2172 }
901e2be2 2173
1884734a 2174 amdgpu_amdkfd_device_probe(adev);
2175
3b94fb10 2176 adev->pm.pp_feature = amdgpu_pp_feature_mask;
a35ad98b 2177 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
00544006 2178 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
4215a119
HC
2179 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2180 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
00f54b97 2181
d38ceaf9
AD
2182 for (i = 0; i < adev->num_ip_blocks; i++) {
2183 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
ed8cf00c
HR
2184 DRM_ERROR("disabled ip block: %d <%s>\n",
2185 i, adev->ip_blocks[i].version->funcs->name);
a1255107 2186 adev->ip_blocks[i].status.valid = false;
d38ceaf9 2187 } else {
a1255107
AD
2188 if (adev->ip_blocks[i].version->funcs->early_init) {
2189 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2c1a2784 2190 if (r == -ENOENT) {
a1255107 2191 adev->ip_blocks[i].status.valid = false;
2c1a2784 2192 } else if (r) {
a1255107
AD
2193 DRM_ERROR("early_init of IP block <%s> failed %d\n",
2194 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 2195 return r;
2c1a2784 2196 } else {
a1255107 2197 adev->ip_blocks[i].status.valid = true;
2c1a2784 2198 }
974e6b64 2199 } else {
a1255107 2200 adev->ip_blocks[i].status.valid = true;
d38ceaf9 2201 }
d38ceaf9 2202 }
21a249ca
AD
2203 /* get the vbios after the asic_funcs are set up */
2204 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
6e29c227
AD
2205 r = amdgpu_device_parse_gpu_info_fw(adev);
2206 if (r)
2207 return r;
2208
21a249ca
AD
2209 /* Read BIOS */
2210 if (!amdgpu_get_bios(adev))
2211 return -EINVAL;
2212
2213 r = amdgpu_atombios_init(adev);
2214 if (r) {
2215 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2216 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2217 return r;
2218 }
77eabc6f
PJZ
2219
2220 /*get pf2vf msg info at it's earliest time*/
2221 if (amdgpu_sriov_vf(adev))
2222 amdgpu_virt_init_data_exchange(adev);
2223
21a249ca 2224 }
d38ceaf9
AD
2225 }
2226
395d1fb9
NH
2227 adev->cg_flags &= amdgpu_cg_mask;
2228 adev->pg_flags &= amdgpu_pg_mask;
2229
d38ceaf9
AD
2230 return 0;
2231}
2232
0a4f2520
RZ
2233static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2234{
2235 int i, r;
2236
2237 for (i = 0; i < adev->num_ip_blocks; i++) {
2238 if (!adev->ip_blocks[i].status.sw)
2239 continue;
2240 if (adev->ip_blocks[i].status.hw)
2241 continue;
2242 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2d11fd3f 2243 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
0a4f2520
RZ
2244 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2245 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2246 if (r) {
2247 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2248 adev->ip_blocks[i].version->funcs->name, r);
2249 return r;
2250 }
2251 adev->ip_blocks[i].status.hw = true;
2252 }
2253 }
2254
2255 return 0;
2256}
2257
2258static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2259{
2260 int i, r;
2261
2262 for (i = 0; i < adev->num_ip_blocks; i++) {
2263 if (!adev->ip_blocks[i].status.sw)
2264 continue;
2265 if (adev->ip_blocks[i].status.hw)
2266 continue;
2267 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2268 if (r) {
2269 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2270 adev->ip_blocks[i].version->funcs->name, r);
2271 return r;
2272 }
2273 adev->ip_blocks[i].status.hw = true;
2274 }
2275
2276 return 0;
2277}
2278
7a3e0bb2
RZ
2279static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2280{
2281 int r = 0;
2282 int i;
80f41f84 2283 uint32_t smu_version;
7a3e0bb2
RZ
2284
2285 if (adev->asic_type >= CHIP_VEGA10) {
2286 for (i = 0; i < adev->num_ip_blocks; i++) {
482f0e53
ML
2287 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2288 continue;
2289
e3c1b071 2290 if (!adev->ip_blocks[i].status.sw)
2291 continue;
2292
482f0e53
ML
2293 /* no need to do the fw loading again if already done*/
2294 if (adev->ip_blocks[i].status.hw == true)
2295 break;
2296
53b3f8f4 2297 if (amdgpu_in_reset(adev) || adev->in_suspend) {
482f0e53
ML
2298 r = adev->ip_blocks[i].version->funcs->resume(adev);
2299 if (r) {
2300 DRM_ERROR("resume of IP block <%s> failed %d\n",
7a3e0bb2 2301 adev->ip_blocks[i].version->funcs->name, r);
482f0e53
ML
2302 return r;
2303 }
2304 } else {
2305 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2306 if (r) {
2307 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2308 adev->ip_blocks[i].version->funcs->name, r);
2309 return r;
7a3e0bb2 2310 }
7a3e0bb2 2311 }
482f0e53
ML
2312
2313 adev->ip_blocks[i].status.hw = true;
2314 break;
7a3e0bb2
RZ
2315 }
2316 }
482f0e53 2317
8973d9ec
ED
2318 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2319 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
7a3e0bb2 2320
80f41f84 2321 return r;
7a3e0bb2
RZ
2322}
2323
5fd8518d
AG
2324static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2325{
2326 long timeout;
2327 int r, i;
2328
2329 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2330 struct amdgpu_ring *ring = adev->rings[i];
2331
2332 /* No need to setup the GPU scheduler for rings that don't need it */
2333 if (!ring || ring->no_scheduler)
2334 continue;
2335
2336 switch (ring->funcs->type) {
2337 case AMDGPU_RING_TYPE_GFX:
2338 timeout = adev->gfx_timeout;
2339 break;
2340 case AMDGPU_RING_TYPE_COMPUTE:
2341 timeout = adev->compute_timeout;
2342 break;
2343 case AMDGPU_RING_TYPE_SDMA:
2344 timeout = adev->sdma_timeout;
2345 break;
2346 default:
2347 timeout = adev->video_timeout;
2348 break;
2349 }
2350
2351 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
2352 ring->num_hw_submission, amdgpu_job_hang_limit,
8ab62eda
JG
2353 timeout, adev->reset_domain->wq,
2354 ring->sched_score, ring->name,
2355 adev->dev);
5fd8518d
AG
2356 if (r) {
2357 DRM_ERROR("Failed to create scheduler on ring %s.\n",
2358 ring->name);
2359 return r;
2360 }
2361 }
2362
2363 return 0;
2364}
2365
2366
e3ecdffa
AD
2367/**
2368 * amdgpu_device_ip_init - run init for hardware IPs
2369 *
2370 * @adev: amdgpu_device pointer
2371 *
2372 * Main initialization pass for hardware IPs. The list of all the hardware
2373 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2374 * are run. sw_init initializes the software state associated with each IP
2375 * and hw_init initializes the hardware associated with each IP.
2376 * Returns 0 on success, negative error code on failure.
2377 */
06ec9070 2378static int amdgpu_device_ip_init(struct amdgpu_device *adev)
d38ceaf9
AD
2379{
2380 int i, r;
2381
c030f2e4 2382 r = amdgpu_ras_init(adev);
2383 if (r)
2384 return r;
2385
d38ceaf9 2386 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2387 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 2388 continue;
a1255107 2389 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2c1a2784 2390 if (r) {
a1255107
AD
2391 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2392 adev->ip_blocks[i].version->funcs->name, r);
72d3f592 2393 goto init_failed;
2c1a2784 2394 }
a1255107 2395 adev->ip_blocks[i].status.sw = true;
bfca0289 2396
d38ceaf9 2397 /* need to do gmc hw init early so we can allocate gpu mem */
a1255107 2398 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
892deb48
VS
2399 /* Try to reserve bad pages early */
2400 if (amdgpu_sriov_vf(adev))
2401 amdgpu_virt_exchange_data(adev);
2402
06ec9070 2403 r = amdgpu_device_vram_scratch_init(adev);
2c1a2784
AD
2404 if (r) {
2405 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
72d3f592 2406 goto init_failed;
2c1a2784 2407 }
a1255107 2408 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784
AD
2409 if (r) {
2410 DRM_ERROR("hw_init %d failed %d\n", i, r);
72d3f592 2411 goto init_failed;
2c1a2784 2412 }
06ec9070 2413 r = amdgpu_device_wb_init(adev);
2c1a2784 2414 if (r) {
06ec9070 2415 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
72d3f592 2416 goto init_failed;
2c1a2784 2417 }
a1255107 2418 adev->ip_blocks[i].status.hw = true;
2493664f
ML
2419
2420 /* right after GMC hw init, we create CSA */
f92d5c61 2421 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
1e256e27
RZ
2422 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2423 AMDGPU_GEM_DOMAIN_VRAM,
2424 AMDGPU_CSA_SIZE);
2493664f
ML
2425 if (r) {
2426 DRM_ERROR("allocate CSA failed %d\n", r);
72d3f592 2427 goto init_failed;
2493664f
ML
2428 }
2429 }
d38ceaf9
AD
2430 }
2431 }
2432
c9ffa427 2433 if (amdgpu_sriov_vf(adev))
22c16d25 2434 amdgpu_virt_init_data_exchange(adev);
c9ffa427 2435
533aed27
AG
2436 r = amdgpu_ib_pool_init(adev);
2437 if (r) {
2438 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2439 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2440 goto init_failed;
2441 }
2442
c8963ea4
RZ
2443 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2444 if (r)
72d3f592 2445 goto init_failed;
0a4f2520
RZ
2446
2447 r = amdgpu_device_ip_hw_init_phase1(adev);
2448 if (r)
72d3f592 2449 goto init_failed;
0a4f2520 2450
7a3e0bb2
RZ
2451 r = amdgpu_device_fw_loading(adev);
2452 if (r)
72d3f592 2453 goto init_failed;
7a3e0bb2 2454
0a4f2520
RZ
2455 r = amdgpu_device_ip_hw_init_phase2(adev);
2456 if (r)
72d3f592 2457 goto init_failed;
d38ceaf9 2458
121a2bc6
AG
2459 /*
2460 * retired pages will be loaded from eeprom and reserved here,
2461 * it should be called after amdgpu_device_ip_hw_init_phase2 since
2462 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2463 * for I2C communication which only true at this point.
b82e65a9
GC
2464 *
2465 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2466 * failure from bad gpu situation and stop amdgpu init process
2467 * accordingly. For other failed cases, it will still release all
2468 * the resource and print error message, rather than returning one
2469 * negative value to upper level.
121a2bc6
AG
2470 *
2471 * Note: theoretically, this should be called before all vram allocations
2472 * to protect retired page from abusing
2473 */
b82e65a9
GC
2474 r = amdgpu_ras_recovery_init(adev);
2475 if (r)
2476 goto init_failed;
121a2bc6 2477
cfbb6b00
AG
2478 /**
2479 * In case of XGMI grab extra reference for reset domain for this device
2480 */
a4c63caf 2481 if (adev->gmc.xgmi.num_physical_nodes > 1) {
cfbb6b00
AG
2482 if (amdgpu_xgmi_add_device(adev) == 0) {
2483 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
a4c63caf 2484
cfbb6b00
AG
2485 if (!hive->reset_domain ||
2486 !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2487 r = -ENOENT;
2488 goto init_failed;
2489 }
e3c1b071 2490
cfbb6b00
AG
2491 /* Drop the early temporary reset domain we created for device */
2492 amdgpu_reset_put_reset_domain(adev->reset_domain);
2493 adev->reset_domain = hive->reset_domain;
a4c63caf
AG
2494 }
2495 }
2496
5fd8518d
AG
2497 r = amdgpu_device_init_schedulers(adev);
2498 if (r)
2499 goto init_failed;
e3c1b071 2500
2501 /* Don't init kfd if whole hive need to be reset during init */
2502 if (!adev->gmc.xgmi.pending_reset)
2503 amdgpu_amdkfd_device_init(adev);
c6332b97 2504
bd607166
KR
2505 amdgpu_fru_get_product_info(adev);
2506
72d3f592 2507init_failed:
c9ffa427 2508 if (amdgpu_sriov_vf(adev))
c6332b97 2509 amdgpu_virt_release_full_gpu(adev, true);
2510
72d3f592 2511 return r;
d38ceaf9
AD
2512}
2513
e3ecdffa
AD
2514/**
2515 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2516 *
2517 * @adev: amdgpu_device pointer
2518 *
2519 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
2520 * this function before a GPU reset. If the value is retained after a
2521 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
2522 */
06ec9070 2523static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
0c49e0b8
CZ
2524{
2525 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2526}
2527
e3ecdffa
AD
2528/**
2529 * amdgpu_device_check_vram_lost - check if vram is valid
2530 *
2531 * @adev: amdgpu_device pointer
2532 *
2533 * Checks the reset magic value written to the gart pointer in VRAM.
2534 * The driver calls this after a GPU reset to see if the contents of
2535 * VRAM is lost or now.
2536 * returns true if vram is lost, false if not.
2537 */
06ec9070 2538static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
0c49e0b8 2539{
dadce777
EQ
2540 if (memcmp(adev->gart.ptr, adev->reset_magic,
2541 AMDGPU_RESET_MAGIC_NUM))
2542 return true;
2543
53b3f8f4 2544 if (!amdgpu_in_reset(adev))
dadce777
EQ
2545 return false;
2546
2547 /*
2548 * For all ASICs with baco/mode1 reset, the VRAM is
2549 * always assumed to be lost.
2550 */
2551 switch (amdgpu_asic_reset_method(adev)) {
2552 case AMD_RESET_METHOD_BACO:
2553 case AMD_RESET_METHOD_MODE1:
2554 return true;
2555 default:
2556 return false;
2557 }
0c49e0b8
CZ
2558}
2559
e3ecdffa 2560/**
1112a46b 2561 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
e3ecdffa
AD
2562 *
2563 * @adev: amdgpu_device pointer
b8b72130 2564 * @state: clockgating state (gate or ungate)
e3ecdffa 2565 *
e3ecdffa 2566 * The list of all the hardware IPs that make up the asic is walked and the
1112a46b
RZ
2567 * set_clockgating_state callbacks are run.
2568 * Late initialization pass enabling clockgating for hardware IPs.
2569 * Fini or suspend, pass disabling clockgating for hardware IPs.
e3ecdffa
AD
2570 * Returns 0 on success, negative error code on failure.
2571 */
fdd34271 2572
5d89bb2d
LL
2573int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2574 enum amd_clockgating_state state)
d38ceaf9 2575{
1112a46b 2576 int i, j, r;
d38ceaf9 2577
4a2ba394
SL
2578 if (amdgpu_emu_mode == 1)
2579 return 0;
2580
1112a46b
RZ
2581 for (j = 0; j < adev->num_ip_blocks; j++) {
2582 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
a2d31dc3 2583 if (!adev->ip_blocks[i].status.late_initialized)
d38ceaf9 2584 continue;
5d70a549
PV
2585 /* skip CG for GFX on S0ix */
2586 if (adev->in_s0ix &&
2587 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2588 continue;
4a446d55 2589 /* skip CG for VCE/UVD, it's handled specially */
a1255107 2590 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
57716327 2591 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
34319b32 2592 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
52f2e779 2593 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
57716327 2594 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
4a446d55 2595 /* enable clockgating to save power */
a1255107 2596 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1112a46b 2597 state);
4a446d55
AD
2598 if (r) {
2599 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
a1255107 2600 adev->ip_blocks[i].version->funcs->name, r);
4a446d55
AD
2601 return r;
2602 }
b0b00ff1 2603 }
d38ceaf9 2604 }
06b18f61 2605
c9f96fd5
RZ
2606 return 0;
2607}
2608
5d89bb2d
LL
2609int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2610 enum amd_powergating_state state)
c9f96fd5 2611{
1112a46b 2612 int i, j, r;
06b18f61 2613
c9f96fd5
RZ
2614 if (amdgpu_emu_mode == 1)
2615 return 0;
2616
1112a46b
RZ
2617 for (j = 0; j < adev->num_ip_blocks; j++) {
2618 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
a2d31dc3 2619 if (!adev->ip_blocks[i].status.late_initialized)
c9f96fd5 2620 continue;
5d70a549
PV
2621 /* skip PG for GFX on S0ix */
2622 if (adev->in_s0ix &&
2623 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2624 continue;
c9f96fd5
RZ
2625 /* skip CG for VCE/UVD, it's handled specially */
2626 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2627 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2628 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
52f2e779 2629 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
c9f96fd5
RZ
2630 adev->ip_blocks[i].version->funcs->set_powergating_state) {
2631 /* enable powergating to save power */
2632 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
1112a46b 2633 state);
c9f96fd5
RZ
2634 if (r) {
2635 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2636 adev->ip_blocks[i].version->funcs->name, r);
2637 return r;
2638 }
2639 }
2640 }
2dc80b00
S
2641 return 0;
2642}
2643
beff74bc
AD
2644static int amdgpu_device_enable_mgpu_fan_boost(void)
2645{
2646 struct amdgpu_gpu_instance *gpu_ins;
2647 struct amdgpu_device *adev;
2648 int i, ret = 0;
2649
2650 mutex_lock(&mgpu_info.mutex);
2651
2652 /*
2653 * MGPU fan boost feature should be enabled
2654 * only when there are two or more dGPUs in
2655 * the system
2656 */
2657 if (mgpu_info.num_dgpu < 2)
2658 goto out;
2659
2660 for (i = 0; i < mgpu_info.num_dgpu; i++) {
2661 gpu_ins = &(mgpu_info.gpu_ins[i]);
2662 adev = gpu_ins->adev;
2663 if (!(adev->flags & AMD_IS_APU) &&
f10bb940 2664 !gpu_ins->mgpu_fan_enabled) {
beff74bc
AD
2665 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2666 if (ret)
2667 break;
2668
2669 gpu_ins->mgpu_fan_enabled = 1;
2670 }
2671 }
2672
2673out:
2674 mutex_unlock(&mgpu_info.mutex);
2675
2676 return ret;
2677}
2678
e3ecdffa
AD
2679/**
2680 * amdgpu_device_ip_late_init - run late init for hardware IPs
2681 *
2682 * @adev: amdgpu_device pointer
2683 *
2684 * Late initialization pass for hardware IPs. The list of all the hardware
2685 * IPs that make up the asic is walked and the late_init callbacks are run.
2686 * late_init covers any special initialization that an IP requires
2687 * after all of the have been initialized or something that needs to happen
2688 * late in the init process.
2689 * Returns 0 on success, negative error code on failure.
2690 */
06ec9070 2691static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2dc80b00 2692{
60599a03 2693 struct amdgpu_gpu_instance *gpu_instance;
2dc80b00
S
2694 int i = 0, r;
2695
2696 for (i = 0; i < adev->num_ip_blocks; i++) {
73f847db 2697 if (!adev->ip_blocks[i].status.hw)
2dc80b00
S
2698 continue;
2699 if (adev->ip_blocks[i].version->funcs->late_init) {
2700 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2701 if (r) {
2702 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2703 adev->ip_blocks[i].version->funcs->name, r);
2704 return r;
2705 }
2dc80b00 2706 }
73f847db 2707 adev->ip_blocks[i].status.late_initialized = true;
2dc80b00
S
2708 }
2709
867e24ca 2710 r = amdgpu_ras_late_init(adev);
2711 if (r) {
2712 DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2713 return r;
2714 }
2715
a891d239
DL
2716 amdgpu_ras_set_error_query_ready(adev, true);
2717
1112a46b
RZ
2718 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2719 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
916ac57f 2720
06ec9070 2721 amdgpu_device_fill_reset_magic(adev);
d38ceaf9 2722
beff74bc
AD
2723 r = amdgpu_device_enable_mgpu_fan_boost();
2724 if (r)
2725 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2726
4da8b639 2727 /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2728 if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
2729 adev->asic_type == CHIP_ALDEBARAN ))
bc143d8b 2730 amdgpu_dpm_handle_passthrough_sbr(adev, true);
60599a03
EQ
2731
2732 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2733 mutex_lock(&mgpu_info.mutex);
2734
2735 /*
2736 * Reset device p-state to low as this was booted with high.
2737 *
2738 * This should be performed only after all devices from the same
2739 * hive get initialized.
2740 *
2741 * However, it's unknown how many device in the hive in advance.
2742 * As this is counted one by one during devices initializations.
2743 *
2744 * So, we wait for all XGMI interlinked devices initialized.
2745 * This may bring some delays as those devices may come from
2746 * different hives. But that should be OK.
2747 */
2748 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2749 for (i = 0; i < mgpu_info.num_gpu; i++) {
2750 gpu_instance = &(mgpu_info.gpu_ins[i]);
2751 if (gpu_instance->adev->flags & AMD_IS_APU)
2752 continue;
2753
d84a430d
JK
2754 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2755 AMDGPU_XGMI_PSTATE_MIN);
60599a03
EQ
2756 if (r) {
2757 DRM_ERROR("pstate setting failed (%d).\n", r);
2758 break;
2759 }
2760 }
2761 }
2762
2763 mutex_unlock(&mgpu_info.mutex);
2764 }
2765
d38ceaf9
AD
2766 return 0;
2767}
2768
613aa3ea
LY
2769/**
2770 * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2771 *
2772 * @adev: amdgpu_device pointer
2773 *
2774 * For ASICs need to disable SMC first
2775 */
2776static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2777{
2778 int i, r;
2779
2780 if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2781 return;
2782
2783 for (i = 0; i < adev->num_ip_blocks; i++) {
2784 if (!adev->ip_blocks[i].status.hw)
2785 continue;
2786 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2787 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2788 /* XXX handle errors */
2789 if (r) {
2790 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2791 adev->ip_blocks[i].version->funcs->name, r);
2792 }
2793 adev->ip_blocks[i].status.hw = false;
2794 break;
2795 }
2796 }
2797}
2798
e9669fb7 2799static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
d38ceaf9
AD
2800{
2801 int i, r;
2802
e9669fb7
AG
2803 for (i = 0; i < adev->num_ip_blocks; i++) {
2804 if (!adev->ip_blocks[i].version->funcs->early_fini)
2805 continue;
5278a159 2806
e9669fb7
AG
2807 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2808 if (r) {
2809 DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2810 adev->ip_blocks[i].version->funcs->name, r);
2811 }
2812 }
c030f2e4 2813
05df1f01 2814 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
fdd34271
RZ
2815 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2816
7270e895
TY
2817 amdgpu_amdkfd_suspend(adev, false);
2818
613aa3ea
LY
2819 /* Workaroud for ASICs need to disable SMC first */
2820 amdgpu_device_smu_fini_early(adev);
3e96dbfd 2821
d38ceaf9 2822 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 2823 if (!adev->ip_blocks[i].status.hw)
d38ceaf9 2824 continue;
8201a67a 2825
a1255107 2826 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
d38ceaf9 2827 /* XXX handle errors */
2c1a2784 2828 if (r) {
a1255107
AD
2829 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2830 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 2831 }
8201a67a 2832
a1255107 2833 adev->ip_blocks[i].status.hw = false;
d38ceaf9
AD
2834 }
2835
6effad8a
GC
2836 if (amdgpu_sriov_vf(adev)) {
2837 if (amdgpu_virt_release_full_gpu(adev, false))
2838 DRM_ERROR("failed to release exclusive mode on fini\n");
2839 }
2840
e9669fb7
AG
2841 return 0;
2842}
2843
2844/**
2845 * amdgpu_device_ip_fini - run fini for hardware IPs
2846 *
2847 * @adev: amdgpu_device pointer
2848 *
2849 * Main teardown pass for hardware IPs. The list of all the hardware
2850 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2851 * are run. hw_fini tears down the hardware associated with each IP
2852 * and sw_fini tears down any software state associated with each IP.
2853 * Returns 0 on success, negative error code on failure.
2854 */
2855static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2856{
2857 int i, r;
2858
2859 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2860 amdgpu_virt_release_ras_err_handler_data(adev);
2861
e9669fb7
AG
2862 if (adev->gmc.xgmi.num_physical_nodes > 1)
2863 amdgpu_xgmi_remove_device(adev);
2864
2865 amdgpu_amdkfd_device_fini_sw(adev);
9950cda2 2866
d38ceaf9 2867 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 2868 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 2869 continue;
c12aba3a
ML
2870
2871 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
c8963ea4 2872 amdgpu_ucode_free_bo(adev);
1e256e27 2873 amdgpu_free_static_csa(&adev->virt.csa_obj);
c12aba3a
ML
2874 amdgpu_device_wb_fini(adev);
2875 amdgpu_device_vram_scratch_fini(adev);
533aed27 2876 amdgpu_ib_pool_fini(adev);
c12aba3a
ML
2877 }
2878
a1255107 2879 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
d38ceaf9 2880 /* XXX handle errors */
2c1a2784 2881 if (r) {
a1255107
AD
2882 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2883 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 2884 }
a1255107
AD
2885 adev->ip_blocks[i].status.sw = false;
2886 adev->ip_blocks[i].status.valid = false;
d38ceaf9
AD
2887 }
2888
a6dcfd9c 2889 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 2890 if (!adev->ip_blocks[i].status.late_initialized)
8a2eef1d 2891 continue;
a1255107
AD
2892 if (adev->ip_blocks[i].version->funcs->late_fini)
2893 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2894 adev->ip_blocks[i].status.late_initialized = false;
a6dcfd9c
ML
2895 }
2896
c030f2e4 2897 amdgpu_ras_fini(adev);
2898
d38ceaf9
AD
2899 return 0;
2900}
2901
e3ecdffa 2902/**
beff74bc 2903 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
e3ecdffa 2904 *
1112a46b 2905 * @work: work_struct.
e3ecdffa 2906 */
beff74bc 2907static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2dc80b00
S
2908{
2909 struct amdgpu_device *adev =
beff74bc 2910 container_of(work, struct amdgpu_device, delayed_init_work.work);
916ac57f
RZ
2911 int r;
2912
2913 r = amdgpu_ib_ring_tests(adev);
2914 if (r)
2915 DRM_ERROR("ib ring test failed (%d).\n", r);
2dc80b00
S
2916}
2917
1e317b99
RZ
2918static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2919{
2920 struct amdgpu_device *adev =
2921 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2922
90a92662
MD
2923 WARN_ON_ONCE(adev->gfx.gfx_off_state);
2924 WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2925
2926 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2927 adev->gfx.gfx_off_state = true;
1e317b99
RZ
2928}
2929
e3ecdffa 2930/**
e7854a03 2931 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
e3ecdffa
AD
2932 *
2933 * @adev: amdgpu_device pointer
2934 *
2935 * Main suspend function for hardware IPs. The list of all the hardware
2936 * IPs that make up the asic is walked, clockgating is disabled and the
2937 * suspend callbacks are run. suspend puts the hardware and software state
2938 * in each IP into a state suitable for suspend.
2939 * Returns 0 on success, negative error code on failure.
2940 */
e7854a03
AD
2941static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2942{
2943 int i, r;
2944
50ec83f0
AD
2945 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2946 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
05df1f01 2947
e7854a03
AD
2948 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2949 if (!adev->ip_blocks[i].status.valid)
2950 continue;
2b9f7848 2951
e7854a03 2952 /* displays are handled separately */
2b9f7848
ND
2953 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2954 continue;
2955
2956 /* XXX handle errors */
2957 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2958 /* XXX handle errors */
2959 if (r) {
2960 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2961 adev->ip_blocks[i].version->funcs->name, r);
2962 return r;
e7854a03 2963 }
2b9f7848
ND
2964
2965 adev->ip_blocks[i].status.hw = false;
e7854a03
AD
2966 }
2967
e7854a03
AD
2968 return 0;
2969}
2970
2971/**
2972 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2973 *
2974 * @adev: amdgpu_device pointer
2975 *
2976 * Main suspend function for hardware IPs. The list of all the hardware
2977 * IPs that make up the asic is walked, clockgating is disabled and the
2978 * suspend callbacks are run. suspend puts the hardware and software state
2979 * in each IP into a state suitable for suspend.
2980 * Returns 0 on success, negative error code on failure.
2981 */
2982static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
d38ceaf9
AD
2983{
2984 int i, r;
2985
557f42a2 2986 if (adev->in_s0ix)
bc143d8b 2987 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
34416931 2988
d38ceaf9 2989 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 2990 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 2991 continue;
e7854a03
AD
2992 /* displays are handled in phase1 */
2993 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2994 continue;
bff77e86
LM
2995 /* PSP lost connection when err_event_athub occurs */
2996 if (amdgpu_ras_intr_triggered() &&
2997 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2998 adev->ip_blocks[i].status.hw = false;
2999 continue;
3000 }
e3c1b071 3001
3002 /* skip unnecessary suspend if we do not initialize them yet */
3003 if (adev->gmc.xgmi.pending_reset &&
3004 !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3005 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
3006 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3007 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
3008 adev->ip_blocks[i].status.hw = false;
3009 continue;
3010 }
557f42a2 3011
32ff160d
AD
3012 /* skip suspend of gfx and psp for S0ix
3013 * gfx is in gfxoff state, so on resume it will exit gfxoff just
3014 * like at runtime. PSP is also part of the always on hardware
3015 * so no need to suspend it.
3016 */
557f42a2 3017 if (adev->in_s0ix &&
32ff160d
AD
3018 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3019 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
557f42a2
AD
3020 continue;
3021
d38ceaf9 3022 /* XXX handle errors */
a1255107 3023 r = adev->ip_blocks[i].version->funcs->suspend(adev);
d38ceaf9 3024 /* XXX handle errors */
2c1a2784 3025 if (r) {
a1255107
AD
3026 DRM_ERROR("suspend of IP block <%s> failed %d\n",
3027 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 3028 }
876923fb 3029 adev->ip_blocks[i].status.hw = false;
a3a09142 3030 /* handle putting the SMC in the appropriate state */
86b93fd6
JZ
3031 if(!amdgpu_sriov_vf(adev)){
3032 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3033 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3034 if (r) {
3035 DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3036 adev->mp1_state, r);
3037 return r;
3038 }
a3a09142
AD
3039 }
3040 }
d38ceaf9
AD
3041 }
3042
3043 return 0;
3044}
3045
e7854a03
AD
3046/**
3047 * amdgpu_device_ip_suspend - run suspend for hardware IPs
3048 *
3049 * @adev: amdgpu_device pointer
3050 *
3051 * Main suspend function for hardware IPs. The list of all the hardware
3052 * IPs that make up the asic is walked, clockgating is disabled and the
3053 * suspend callbacks are run. suspend puts the hardware and software state
3054 * in each IP into a state suitable for suspend.
3055 * Returns 0 on success, negative error code on failure.
3056 */
3057int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3058{
3059 int r;
3060
3c73683c
JC
3061 if (amdgpu_sriov_vf(adev)) {
3062 amdgpu_virt_fini_data_exchange(adev);
e7819644 3063 amdgpu_virt_request_full_gpu(adev, false);
3c73683c 3064 }
e7819644 3065
e7854a03
AD
3066 r = amdgpu_device_ip_suspend_phase1(adev);
3067 if (r)
3068 return r;
3069 r = amdgpu_device_ip_suspend_phase2(adev);
3070
e7819644
YT
3071 if (amdgpu_sriov_vf(adev))
3072 amdgpu_virt_release_full_gpu(adev, false);
3073
e7854a03
AD
3074 return r;
3075}
3076
06ec9070 3077static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
a90ad3c2
ML
3078{
3079 int i, r;
3080
2cb681b6
ML
3081 static enum amd_ip_block_type ip_order[] = {
3082 AMD_IP_BLOCK_TYPE_GMC,
3083 AMD_IP_BLOCK_TYPE_COMMON,
39186aef 3084 AMD_IP_BLOCK_TYPE_PSP,
2cb681b6
ML
3085 AMD_IP_BLOCK_TYPE_IH,
3086 };
a90ad3c2 3087
95ea3dbc 3088 for (i = 0; i < adev->num_ip_blocks; i++) {
2cb681b6
ML
3089 int j;
3090 struct amdgpu_ip_block *block;
a90ad3c2 3091
4cd2a96d
J
3092 block = &adev->ip_blocks[i];
3093 block->status.hw = false;
2cb681b6 3094
4cd2a96d 3095 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
2cb681b6 3096
4cd2a96d 3097 if (block->version->type != ip_order[j] ||
2cb681b6
ML
3098 !block->status.valid)
3099 continue;
3100
3101 r = block->version->funcs->hw_init(adev);
0aaeefcc 3102 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
c41d1cf6
ML
3103 if (r)
3104 return r;
482f0e53 3105 block->status.hw = true;
a90ad3c2
ML
3106 }
3107 }
3108
3109 return 0;
3110}
3111
06ec9070 3112static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
a90ad3c2
ML
3113{
3114 int i, r;
3115
2cb681b6
ML
3116 static enum amd_ip_block_type ip_order[] = {
3117 AMD_IP_BLOCK_TYPE_SMC,
3118 AMD_IP_BLOCK_TYPE_DCE,
3119 AMD_IP_BLOCK_TYPE_GFX,
3120 AMD_IP_BLOCK_TYPE_SDMA,
257deb8c 3121 AMD_IP_BLOCK_TYPE_UVD,
d83c7a07
JJ
3122 AMD_IP_BLOCK_TYPE_VCE,
3123 AMD_IP_BLOCK_TYPE_VCN
2cb681b6 3124 };
a90ad3c2 3125
2cb681b6
ML
3126 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3127 int j;
3128 struct amdgpu_ip_block *block;
a90ad3c2 3129
2cb681b6
ML
3130 for (j = 0; j < adev->num_ip_blocks; j++) {
3131 block = &adev->ip_blocks[j];
3132
3133 if (block->version->type != ip_order[i] ||
482f0e53
ML
3134 !block->status.valid ||
3135 block->status.hw)
2cb681b6
ML
3136 continue;
3137
895bd048
JZ
3138 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3139 r = block->version->funcs->resume(adev);
3140 else
3141 r = block->version->funcs->hw_init(adev);
3142
0aaeefcc 3143 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
c41d1cf6
ML
3144 if (r)
3145 return r;
482f0e53 3146 block->status.hw = true;
a90ad3c2
ML
3147 }
3148 }
3149
3150 return 0;
3151}
3152
e3ecdffa
AD
3153/**
3154 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3155 *
3156 * @adev: amdgpu_device pointer
3157 *
3158 * First resume function for hardware IPs. The list of all the hardware
3159 * IPs that make up the asic is walked and the resume callbacks are run for
3160 * COMMON, GMC, and IH. resume puts the hardware into a functional state
3161 * after a suspend and updates the software state as necessary. This
3162 * function is also used for restoring the GPU after a GPU reset.
3163 * Returns 0 on success, negative error code on failure.
3164 */
06ec9070 3165static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
d38ceaf9
AD
3166{
3167 int i, r;
3168
a90ad3c2 3169 for (i = 0; i < adev->num_ip_blocks; i++) {
482f0e53 3170 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
a90ad3c2 3171 continue;
a90ad3c2 3172 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
e3ecdffa
AD
3173 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3174 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
482f0e53 3175
fcf0649f
CZ
3176 r = adev->ip_blocks[i].version->funcs->resume(adev);
3177 if (r) {
3178 DRM_ERROR("resume of IP block <%s> failed %d\n",
3179 adev->ip_blocks[i].version->funcs->name, r);
3180 return r;
3181 }
482f0e53 3182 adev->ip_blocks[i].status.hw = true;
a90ad3c2
ML
3183 }
3184 }
3185
3186 return 0;
3187}
3188
e3ecdffa
AD
3189/**
3190 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3191 *
3192 * @adev: amdgpu_device pointer
3193 *
3194 * First resume function for hardware IPs. The list of all the hardware
3195 * IPs that make up the asic is walked and the resume callbacks are run for
3196 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
3197 * functional state after a suspend and updates the software state as
3198 * necessary. This function is also used for restoring the GPU after a GPU
3199 * reset.
3200 * Returns 0 on success, negative error code on failure.
3201 */
06ec9070 3202static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
d38ceaf9
AD
3203{
3204 int i, r;
3205
3206 for (i = 0; i < adev->num_ip_blocks; i++) {
482f0e53 3207 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
d38ceaf9 3208 continue;
fcf0649f 3209 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
e3ecdffa 3210 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
7a3e0bb2
RZ
3211 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3212 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
fcf0649f 3213 continue;
a1255107 3214 r = adev->ip_blocks[i].version->funcs->resume(adev);
2c1a2784 3215 if (r) {
a1255107
AD
3216 DRM_ERROR("resume of IP block <%s> failed %d\n",
3217 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 3218 return r;
2c1a2784 3219 }
482f0e53 3220 adev->ip_blocks[i].status.hw = true;
d38ceaf9
AD
3221 }
3222
3223 return 0;
3224}
3225
e3ecdffa
AD
3226/**
3227 * amdgpu_device_ip_resume - run resume for hardware IPs
3228 *
3229 * @adev: amdgpu_device pointer
3230 *
3231 * Main resume function for hardware IPs. The hardware IPs
3232 * are split into two resume functions because they are
3233 * are also used in in recovering from a GPU reset and some additional
3234 * steps need to be take between them. In this case (S3/S4) they are
3235 * run sequentially.
3236 * Returns 0 on success, negative error code on failure.
3237 */
06ec9070 3238static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
fcf0649f
CZ
3239{
3240 int r;
3241
9cec53c1
JZ
3242 r = amdgpu_amdkfd_resume_iommu(adev);
3243 if (r)
3244 return r;
3245
06ec9070 3246 r = amdgpu_device_ip_resume_phase1(adev);
fcf0649f
CZ
3247 if (r)
3248 return r;
7a3e0bb2
RZ
3249
3250 r = amdgpu_device_fw_loading(adev);
3251 if (r)
3252 return r;
3253
06ec9070 3254 r = amdgpu_device_ip_resume_phase2(adev);
fcf0649f
CZ
3255
3256 return r;
3257}
3258
e3ecdffa
AD
3259/**
3260 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3261 *
3262 * @adev: amdgpu_device pointer
3263 *
3264 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3265 */
4e99a44e 3266static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
048765ad 3267{
6867e1b5
ML
3268 if (amdgpu_sriov_vf(adev)) {
3269 if (adev->is_atom_fw) {
58ff791a 3270 if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
6867e1b5
ML
3271 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3272 } else {
3273 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3274 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3275 }
3276
3277 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3278 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
a5bde2f9 3279 }
048765ad
AR
3280}
3281
e3ecdffa
AD
3282/**
3283 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3284 *
3285 * @asic_type: AMD asic type
3286 *
3287 * Check if there is DC (new modesetting infrastructre) support for an asic.
3288 * returns true if DC has support, false if not.
3289 */
4562236b
HW
3290bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3291{
3292 switch (asic_type) {
0637d417
AD
3293#ifdef CONFIG_DRM_AMDGPU_SI
3294 case CHIP_HAINAN:
3295#endif
3296 case CHIP_TOPAZ:
3297 /* chips with no display hardware */
3298 return false;
4562236b 3299#if defined(CONFIG_DRM_AMD_DC)
64200c46
MR
3300 case CHIP_TAHITI:
3301 case CHIP_PITCAIRN:
3302 case CHIP_VERDE:
3303 case CHIP_OLAND:
2d32ffd6
AD
3304 /*
3305 * We have systems in the wild with these ASICs that require
3306 * LVDS and VGA support which is not supported with DC.
3307 *
3308 * Fallback to the non-DC driver here by default so as not to
3309 * cause regressions.
3310 */
3311#if defined(CONFIG_DRM_AMD_DC_SI)
3312 return amdgpu_dc > 0;
3313#else
3314 return false;
64200c46 3315#endif
4562236b 3316 case CHIP_BONAIRE:
0d6fbccb 3317 case CHIP_KAVERI:
367e6687
AD
3318 case CHIP_KABINI:
3319 case CHIP_MULLINS:
d9fda248
HW
3320 /*
3321 * We have systems in the wild with these ASICs that require
3322 * LVDS and VGA support which is not supported with DC.
3323 *
3324 * Fallback to the non-DC driver here by default so as not to
3325 * cause regressions.
3326 */
3327 return amdgpu_dc > 0;
3328 case CHIP_HAWAII:
4562236b
HW
3329 case CHIP_CARRIZO:
3330 case CHIP_STONEY:
4562236b 3331 case CHIP_POLARIS10:
675fd32b 3332 case CHIP_POLARIS11:
2c8ad2d5 3333 case CHIP_POLARIS12:
675fd32b 3334 case CHIP_VEGAM:
4562236b
HW
3335 case CHIP_TONGA:
3336 case CHIP_FIJI:
42f8ffa1 3337 case CHIP_VEGA10:
dca7b401 3338 case CHIP_VEGA12:
c6034aa2 3339 case CHIP_VEGA20:
b86a1aa3 3340#if defined(CONFIG_DRM_AMD_DC_DCN)
fd187853 3341 case CHIP_RAVEN:
b4f199c7 3342 case CHIP_NAVI10:
8fceceb6 3343 case CHIP_NAVI14:
078655d9 3344 case CHIP_NAVI12:
e1c14c43 3345 case CHIP_RENOIR:
3f68c01b 3346 case CHIP_CYAN_SKILLFISH:
81d9bfb8 3347 case CHIP_SIENNA_CICHLID:
a6c5308f 3348 case CHIP_NAVY_FLOUNDER:
7cc656e2 3349 case CHIP_DIMGREY_CAVEFISH:
ddaed58b 3350 case CHIP_BEIGE_GOBY:
84b934bc 3351 case CHIP_VANGOGH:
c8b73f7f 3352 case CHIP_YELLOW_CARP:
42f8ffa1 3353#endif
f7f12b25 3354 default:
fd187853 3355 return amdgpu_dc != 0;
f7f12b25 3356#else
4562236b 3357 default:
93b09a9a 3358 if (amdgpu_dc > 0)
044a48f4 3359 DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
93b09a9a 3360 "but isn't supported by ASIC, ignoring\n");
4562236b 3361 return false;
f7f12b25 3362#endif
4562236b
HW
3363 }
3364}
3365
3366/**
3367 * amdgpu_device_has_dc_support - check if dc is supported
3368 *
982a820b 3369 * @adev: amdgpu_device pointer
4562236b
HW
3370 *
3371 * Returns true for supported, false for not supported
3372 */
3373bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3374{
abaf210c
AS
3375 if (amdgpu_sriov_vf(adev) ||
3376 adev->enable_virtual_display ||
3377 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
2555039d
XY
3378 return false;
3379
4562236b
HW
3380 return amdgpu_device_asic_has_dc_support(adev->asic_type);
3381}
3382
d4535e2c
AG
3383static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3384{
3385 struct amdgpu_device *adev =
3386 container_of(__work, struct amdgpu_device, xgmi_reset_work);
d95e8e97 3387 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
d4535e2c 3388
c6a6e2db
AG
3389 /* It's a bug to not have a hive within this function */
3390 if (WARN_ON(!hive))
3391 return;
3392
3393 /*
3394 * Use task barrier to synchronize all xgmi reset works across the
3395 * hive. task_barrier_enter and task_barrier_exit will block
3396 * until all the threads running the xgmi reset works reach
3397 * those points. task_barrier_full will do both blocks.
3398 */
3399 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3400
3401 task_barrier_enter(&hive->tb);
4a580877 3402 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
c6a6e2db
AG
3403
3404 if (adev->asic_reset_res)
3405 goto fail;
3406
3407 task_barrier_exit(&hive->tb);
4a580877 3408 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
c6a6e2db
AG
3409
3410 if (adev->asic_reset_res)
3411 goto fail;
43c4d576 3412
5e67bba3 3413 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3414 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3415 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
c6a6e2db
AG
3416 } else {
3417
3418 task_barrier_full(&hive->tb);
3419 adev->asic_reset_res = amdgpu_asic_reset(adev);
3420 }
ce316fa5 3421
c6a6e2db 3422fail:
d4535e2c 3423 if (adev->asic_reset_res)
fed184e9 3424 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
4a580877 3425 adev->asic_reset_res, adev_to_drm(adev)->unique);
d95e8e97 3426 amdgpu_put_xgmi_hive(hive);
d4535e2c
AG
3427}
3428
71f98027
AD
3429static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3430{
3431 char *input = amdgpu_lockup_timeout;
3432 char *timeout_setting = NULL;
3433 int index = 0;
3434 long timeout;
3435 int ret = 0;
3436
3437 /*
67387dfe
AD
3438 * By default timeout for non compute jobs is 10000
3439 * and 60000 for compute jobs.
71f98027 3440 * In SR-IOV or passthrough mode, timeout for compute
b7b2a316 3441 * jobs are 60000 by default.
71f98027
AD
3442 */
3443 adev->gfx_timeout = msecs_to_jiffies(10000);
3444 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
9882e278
ED
3445 if (amdgpu_sriov_vf(adev))
3446 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3447 msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
71f98027 3448 else
67387dfe 3449 adev->compute_timeout = msecs_to_jiffies(60000);
71f98027 3450
f440ff44 3451 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
71f98027 3452 while ((timeout_setting = strsep(&input, ",")) &&
f440ff44 3453 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
71f98027
AD
3454 ret = kstrtol(timeout_setting, 0, &timeout);
3455 if (ret)
3456 return ret;
3457
3458 if (timeout == 0) {
3459 index++;
3460 continue;
3461 } else if (timeout < 0) {
3462 timeout = MAX_SCHEDULE_TIMEOUT;
127aedf9
CK
3463 dev_warn(adev->dev, "lockup timeout disabled");
3464 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
71f98027
AD
3465 } else {
3466 timeout = msecs_to_jiffies(timeout);
3467 }
3468
3469 switch (index++) {
3470 case 0:
3471 adev->gfx_timeout = timeout;
3472 break;
3473 case 1:
3474 adev->compute_timeout = timeout;
3475 break;
3476 case 2:
3477 adev->sdma_timeout = timeout;
3478 break;
3479 case 3:
3480 adev->video_timeout = timeout;
3481 break;
3482 default:
3483 break;
3484 }
3485 }
3486 /*
3487 * There is only one value specified and
3488 * it should apply to all non-compute jobs.
3489 */
bcccee89 3490 if (index == 1) {
71f98027 3491 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
bcccee89
ED
3492 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3493 adev->compute_timeout = adev->gfx_timeout;
3494 }
71f98027
AD
3495 }
3496
3497 return ret;
3498}
d4535e2c 3499
4a74c38c
PY
3500/**
3501 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3502 *
3503 * @adev: amdgpu_device pointer
3504 *
3505 * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3506 */
3507static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3508{
3509 struct iommu_domain *domain;
3510
3511 domain = iommu_get_domain_for_dev(adev->dev);
3512 if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3513 adev->ram_is_direct_mapped = true;
3514}
3515
77f3a5cd
ND
3516static const struct attribute *amdgpu_dev_attributes[] = {
3517 &dev_attr_product_name.attr,
3518 &dev_attr_product_number.attr,
3519 &dev_attr_serial_number.attr,
3520 &dev_attr_pcie_replay_count.attr,
3521 NULL
3522};
3523
d38ceaf9
AD
3524/**
3525 * amdgpu_device_init - initialize the driver
3526 *
3527 * @adev: amdgpu_device pointer
d38ceaf9
AD
3528 * @flags: driver flags
3529 *
3530 * Initializes the driver info and hw (all asics).
3531 * Returns 0 for success or an error on failure.
3532 * Called at driver startup.
3533 */
3534int amdgpu_device_init(struct amdgpu_device *adev,
d38ceaf9
AD
3535 uint32_t flags)
3536{
8aba21b7
LT
3537 struct drm_device *ddev = adev_to_drm(adev);
3538 struct pci_dev *pdev = adev->pdev;
d38ceaf9 3539 int r, i;
b98c6299 3540 bool px = false;
95844d20 3541 u32 max_MBps;
d38ceaf9
AD
3542
3543 adev->shutdown = false;
d38ceaf9 3544 adev->flags = flags;
4e66d7d2
YZ
3545
3546 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3547 adev->asic_type = amdgpu_force_asic_type;
3548 else
3549 adev->asic_type = flags & AMD_ASIC_MASK;
3550
d38ceaf9 3551 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
593aa2d2 3552 if (amdgpu_emu_mode == 1)
8bdab6bb 3553 adev->usec_timeout *= 10;
770d13b1 3554 adev->gmc.gart_size = 512 * 1024 * 1024;
d38ceaf9
AD
3555 adev->accel_working = false;
3556 adev->num_rings = 0;
3557 adev->mman.buffer_funcs = NULL;
3558 adev->mman.buffer_funcs_ring = NULL;
3559 adev->vm_manager.vm_pte_funcs = NULL;
0c88b430 3560 adev->vm_manager.vm_pte_num_scheds = 0;
132f34e4 3561 adev->gmc.gmc_funcs = NULL;
7bd939d0 3562 adev->harvest_ip_mask = 0x0;
f54d1867 3563 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
b8866c26 3564 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
d38ceaf9
AD
3565
3566 adev->smc_rreg = &amdgpu_invalid_rreg;
3567 adev->smc_wreg = &amdgpu_invalid_wreg;
3568 adev->pcie_rreg = &amdgpu_invalid_rreg;
3569 adev->pcie_wreg = &amdgpu_invalid_wreg;
36b9a952
HR
3570 adev->pciep_rreg = &amdgpu_invalid_rreg;
3571 adev->pciep_wreg = &amdgpu_invalid_wreg;
4fa1c6a6
TZ
3572 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3573 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
d38ceaf9
AD
3574 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3575 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3576 adev->didt_rreg = &amdgpu_invalid_rreg;
3577 adev->didt_wreg = &amdgpu_invalid_wreg;
ccdbb20a
RZ
3578 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3579 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
3580 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3581 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3582
3e39ab90
AD
3583 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3584 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3585 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
d38ceaf9
AD
3586
3587 /* mutex initialization are all done here so we
3588 * can recall function without having locking issues */
0e5ca0d1 3589 mutex_init(&adev->firmware.mutex);
d38ceaf9
AD
3590 mutex_init(&adev->pm.mutex);
3591 mutex_init(&adev->gfx.gpu_clock_mutex);
3592 mutex_init(&adev->srbm_mutex);
b8866c26 3593 mutex_init(&adev->gfx.pipe_reserve_mutex);
d23ee13f 3594 mutex_init(&adev->gfx.gfx_off_mutex);
d38ceaf9 3595 mutex_init(&adev->grbm_idx_mutex);
d38ceaf9 3596 mutex_init(&adev->mn_lock);
e23b74aa 3597 mutex_init(&adev->virt.vf_errors.lock);
d38ceaf9 3598 hash_init(adev->mn_hash);
32eaeae0 3599 mutex_init(&adev->psp.mutex);
bd052211 3600 mutex_init(&adev->notifier_lock);
8cda7a4f 3601 mutex_init(&adev->pm.stable_pstate_ctx_lock);
f113cc32 3602 mutex_init(&adev->benchmark_mutex);
d38ceaf9 3603
ab3b9de6 3604 amdgpu_device_init_apu_flags(adev);
9f6a7857 3605
912dfc84
EQ
3606 r = amdgpu_device_check_arguments(adev);
3607 if (r)
3608 return r;
d38ceaf9 3609
d38ceaf9
AD
3610 spin_lock_init(&adev->mmio_idx_lock);
3611 spin_lock_init(&adev->smc_idx_lock);
3612 spin_lock_init(&adev->pcie_idx_lock);
3613 spin_lock_init(&adev->uvd_ctx_idx_lock);
3614 spin_lock_init(&adev->didt_idx_lock);
ccdbb20a 3615 spin_lock_init(&adev->gc_cac_idx_lock);
16abb5d2 3616 spin_lock_init(&adev->se_cac_idx_lock);
d38ceaf9 3617 spin_lock_init(&adev->audio_endpt_idx_lock);
95844d20 3618 spin_lock_init(&adev->mm_stats.lock);
d38ceaf9 3619
0c4e7fa5
CZ
3620 INIT_LIST_HEAD(&adev->shadow_list);
3621 mutex_init(&adev->shadow_list_lock);
3622
655ce9cb 3623 INIT_LIST_HEAD(&adev->reset_list);
3624
6492e1b0 3625 INIT_LIST_HEAD(&adev->ras_list);
3626
beff74bc
AD
3627 INIT_DELAYED_WORK(&adev->delayed_init_work,
3628 amdgpu_device_delayed_init_work_handler);
1e317b99
RZ
3629 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3630 amdgpu_device_delay_enable_gfx_off);
2dc80b00 3631
d4535e2c
AG
3632 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3633
d23ee13f 3634 adev->gfx.gfx_off_req_count = 1;
b6e79d9a 3635 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
b1ddf548 3636
b265bdbd
EQ
3637 atomic_set(&adev->throttling_logging_enabled, 1);
3638 /*
3639 * If throttling continues, logging will be performed every minute
3640 * to avoid log flooding. "-1" is subtracted since the thermal
3641 * throttling interrupt comes every second. Thus, the total logging
3642 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3643 * for throttling interrupt) = 60 seconds.
3644 */
3645 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3646 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3647
0fa49558
AX
3648 /* Registers mapping */
3649 /* TODO: block userspace mapping of io register */
da69c161
KW
3650 if (adev->asic_type >= CHIP_BONAIRE) {
3651 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3652 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3653 } else {
3654 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3655 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3656 }
d38ceaf9 3657
6c08e0ef
EQ
3658 for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3659 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3660
d38ceaf9
AD
3661 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3662 if (adev->rmmio == NULL) {
3663 return -ENOMEM;
3664 }
3665 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3666 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3667
5494d864
AD
3668 amdgpu_device_get_pcie_info(adev);
3669
b239c017
JX
3670 if (amdgpu_mcbp)
3671 DRM_INFO("MCBP is enabled\n");
3672
928fe236
JX
3673 if (adev->asic_type >= CHIP_NAVI10) {
3674 if (amdgpu_mes || amdgpu_mes_kiq)
3675 adev->enable_mes = true;
3676
3677 if (amdgpu_mes_kiq)
3678 adev->enable_mes_kiq = true;
3679 }
5f84cc63 3680
436afdfa
PY
3681 /*
3682 * Reset domain needs to be present early, before XGMI hive discovered
3683 * (if any) and intitialized to use reset sem and in_gpu reset flag
3684 * early on during init and before calling to RREG32.
3685 */
3686 adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3687 if (!adev->reset_domain)
3688 return -ENOMEM;
3689
3aa0115d
ML
3690 /* detect hw virtualization here */
3691 amdgpu_detect_virtualization(adev);
3692
dffa11b4
ML
3693 r = amdgpu_device_get_job_timeout_settings(adev);
3694 if (r) {
3695 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
4ef87d8f 3696 return r;
a190d1c7
XY
3697 }
3698
d38ceaf9 3699 /* early init functions */
06ec9070 3700 r = amdgpu_device_ip_early_init(adev);
d38ceaf9 3701 if (r)
4ef87d8f 3702 return r;
d38ceaf9 3703
957b0787 3704 amdgpu_gmc_noretry_set(adev);
4a0165f0
VS
3705 /* Need to get xgmi info early to decide the reset behavior*/
3706 if (adev->gmc.xgmi.supported) {
3707 r = adev->gfxhub.funcs->get_xgmi_info(adev);
3708 if (r)
3709 return r;
3710 }
3711
8e6d0b69 3712 /* enable PCIE atomic ops */
3713 if (amdgpu_sriov_vf(adev))
3714 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
e15c9d06 3715 adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
8e6d0b69 3716 (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3717 else
3718 adev->have_atomics_support =
3719 !pci_enable_atomic_ops_to_root(adev->pdev,
3720 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3721 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3722 if (!adev->have_atomics_support)
3723 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3724
6585661d
OZ
3725 /* doorbell bar mapping and doorbell index init*/
3726 amdgpu_device_doorbell_init(adev);
3727
9475a943
SL
3728 if (amdgpu_emu_mode == 1) {
3729 /* post the asic on emulation mode */
3730 emu_soc_asic_init(adev);
bfca0289 3731 goto fence_driver_init;
9475a943 3732 }
bfca0289 3733
04442bf7
LL
3734 amdgpu_reset_init(adev);
3735
4e99a44e
ML
3736 /* detect if we are with an SRIOV vbios */
3737 amdgpu_device_detect_sriov_bios(adev);
048765ad 3738
95e8e59e
AD
3739 /* check if we need to reset the asic
3740 * E.g., driver was not cleanly unloaded previously, etc.
3741 */
f14899fd 3742 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
e3c1b071 3743 if (adev->gmc.xgmi.num_physical_nodes) {
3744 dev_info(adev->dev, "Pending hive reset.\n");
3745 adev->gmc.xgmi.pending_reset = true;
3746 /* Only need to init necessary block for SMU to handle the reset */
3747 for (i = 0; i < adev->num_ip_blocks; i++) {
3748 if (!adev->ip_blocks[i].status.valid)
3749 continue;
3750 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3751 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3752 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3753 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
751f43e7 3754 DRM_DEBUG("IP %s disabled for hw_init.\n",
e3c1b071 3755 adev->ip_blocks[i].version->funcs->name);
3756 adev->ip_blocks[i].status.hw = true;
3757 }
3758 }
3759 } else {
3760 r = amdgpu_asic_reset(adev);
3761 if (r) {
3762 dev_err(adev->dev, "asic reset on init failed\n");
3763 goto failed;
3764 }
95e8e59e
AD
3765 }
3766 }
3767
8f66090b 3768 pci_enable_pcie_error_reporting(adev->pdev);
c9a6b82f 3769
d38ceaf9 3770 /* Post card if necessary */
39c640c0 3771 if (amdgpu_device_need_post(adev)) {
d38ceaf9 3772 if (!adev->bios) {
bec86378 3773 dev_err(adev->dev, "no vBIOS found\n");
83ba126a
AD
3774 r = -EINVAL;
3775 goto failed;
d38ceaf9 3776 }
bec86378 3777 DRM_INFO("GPU posting now...\n");
4d2997ab 3778 r = amdgpu_device_asic_init(adev);
4e99a44e
ML
3779 if (r) {
3780 dev_err(adev->dev, "gpu post error!\n");
3781 goto failed;
3782 }
d38ceaf9
AD
3783 }
3784
88b64e95
AD
3785 if (adev->is_atom_fw) {
3786 /* Initialize clocks */
3787 r = amdgpu_atomfirmware_get_clock_info(adev);
3788 if (r) {
3789 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
e23b74aa 3790 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
88b64e95
AD
3791 goto failed;
3792 }
3793 } else {
a5bde2f9
AD
3794 /* Initialize clocks */
3795 r = amdgpu_atombios_get_clock_info(adev);
3796 if (r) {
3797 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
e23b74aa 3798 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
89041940 3799 goto failed;
a5bde2f9
AD
3800 }
3801 /* init i2c buses */
4562236b
HW
3802 if (!amdgpu_device_has_dc_support(adev))
3803 amdgpu_atombios_i2c_init(adev);
2c1a2784 3804 }
d38ceaf9 3805
bfca0289 3806fence_driver_init:
d38ceaf9 3807 /* Fence driver */
067f44c8 3808 r = amdgpu_fence_driver_sw_init(adev);
2c1a2784 3809 if (r) {
067f44c8 3810 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
e23b74aa 3811 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
83ba126a 3812 goto failed;
2c1a2784 3813 }
d38ceaf9
AD
3814
3815 /* init the mode config */
4a580877 3816 drm_mode_config_init(adev_to_drm(adev));
d38ceaf9 3817
06ec9070 3818 r = amdgpu_device_ip_init(adev);
d38ceaf9 3819 if (r) {
8840a387 3820 /* failed in exclusive mode due to timeout */
3821 if (amdgpu_sriov_vf(adev) &&
3822 !amdgpu_sriov_runtime(adev) &&
3823 amdgpu_virt_mmio_blocked(adev) &&
3824 !amdgpu_virt_wait_reset(adev)) {
3825 dev_err(adev->dev, "VF exclusive mode timeout\n");
1daee8b4
PD
3826 /* Don't send request since VF is inactive. */
3827 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3828 adev->virt.ops = NULL;
8840a387 3829 r = -EAGAIN;
970fd197 3830 goto release_ras_con;
8840a387 3831 }
06ec9070 3832 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
e23b74aa 3833 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
970fd197 3834 goto release_ras_con;
d38ceaf9
AD
3835 }
3836
8d35a259
LG
3837 amdgpu_fence_driver_hw_init(adev);
3838
d69b8971
YZ
3839 dev_info(adev->dev,
3840 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
d7f72fe4
YZ
3841 adev->gfx.config.max_shader_engines,
3842 adev->gfx.config.max_sh_per_se,
3843 adev->gfx.config.max_cu_per_sh,
3844 adev->gfx.cu_info.number);
3845
d38ceaf9
AD
3846 adev->accel_working = true;
3847
e59c0205
AX
3848 amdgpu_vm_check_compute_bug(adev);
3849
95844d20
MO
3850 /* Initialize the buffer migration limit. */
3851 if (amdgpu_moverate >= 0)
3852 max_MBps = amdgpu_moverate;
3853 else
3854 max_MBps = 8; /* Allow 8 MB/s. */
3855 /* Get a log2 for easy divisions. */
3856 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3857
d2f52ac8 3858 r = amdgpu_pm_sysfs_init(adev);
7c868b59
YT
3859 if (r) {
3860 adev->pm_sysfs_en = false;
d2f52ac8 3861 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
7c868b59
YT
3862 } else
3863 adev->pm_sysfs_en = true;
d2f52ac8 3864
5bb23532 3865 r = amdgpu_ucode_sysfs_init(adev);
7c868b59
YT
3866 if (r) {
3867 adev->ucode_sysfs_en = false;
5bb23532 3868 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
7c868b59
YT
3869 } else
3870 adev->ucode_sysfs_en = true;
5bb23532 3871
b0adca4d
EQ
3872 /*
3873 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3874 * Otherwise the mgpu fan boost feature will be skipped due to the
3875 * gpu instance is counted less.
3876 */
3877 amdgpu_register_gpu_instance(adev);
3878
d38ceaf9
AD
3879 /* enable clockgating, etc. after ib tests, etc. since some blocks require
3880 * explicit gating rather than handling it automatically.
3881 */
e3c1b071 3882 if (!adev->gmc.xgmi.pending_reset) {
3883 r = amdgpu_device_ip_late_init(adev);
3884 if (r) {
3885 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3886 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
970fd197 3887 goto release_ras_con;
e3c1b071 3888 }
3889 /* must succeed. */
3890 amdgpu_ras_resume(adev);
3891 queue_delayed_work(system_wq, &adev->delayed_init_work,
3892 msecs_to_jiffies(AMDGPU_RESUME_MS));
2c1a2784 3893 }
d38ceaf9 3894
2c738637
ML
3895 if (amdgpu_sriov_vf(adev))
3896 flush_delayed_work(&adev->delayed_init_work);
3897
77f3a5cd 3898 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
5aea5327 3899 if (r)
77f3a5cd 3900 dev_err(adev->dev, "Could not create amdgpu device attr\n");
bd607166 3901
d155bef0
AB
3902 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3903 r = amdgpu_pmu_init(adev);
9c7c85f7
JK
3904 if (r)
3905 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3906
c1dd4aa6
AG
3907 /* Have stored pci confspace at hand for restore in sudden PCI error */
3908 if (amdgpu_device_cache_pci_state(adev->pdev))
3909 pci_restore_state(pdev);
3910
8c3dd61c
KHF
3911 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3912 /* this will fail for cards that aren't VGA class devices, just
3913 * ignore it */
3914 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
bf44e8ce 3915 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
8c3dd61c
KHF
3916
3917 if (amdgpu_device_supports_px(ddev)) {
3918 px = true;
3919 vga_switcheroo_register_client(adev->pdev,
3920 &amdgpu_switcheroo_ops, px);
3921 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3922 }
3923
e3c1b071 3924 if (adev->gmc.xgmi.pending_reset)
3925 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3926 msecs_to_jiffies(AMDGPU_RESUME_MS));
3927
4a74c38c
PY
3928 amdgpu_device_check_iommu_direct_map(adev);
3929
d38ceaf9 3930 return 0;
83ba126a 3931
970fd197
SY
3932release_ras_con:
3933 amdgpu_release_ras_context(adev);
3934
83ba126a 3935failed:
89041940 3936 amdgpu_vf_error_trans_all(adev);
8840a387 3937
83ba126a 3938 return r;
d38ceaf9
AD
3939}
3940
07775fc1
AG
3941static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3942{
62d5f9f7 3943
07775fc1
AG
3944 /* Clear all CPU mappings pointing to this device */
3945 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3946
3947 /* Unmap all mapped bars - Doorbell, registers and VRAM */
3948 amdgpu_device_doorbell_fini(adev);
3949
3950 iounmap(adev->rmmio);
3951 adev->rmmio = NULL;
3952 if (adev->mman.aper_base_kaddr)
3953 iounmap(adev->mman.aper_base_kaddr);
3954 adev->mman.aper_base_kaddr = NULL;
3955
3956 /* Memory manager related */
3957 if (!adev->gmc.xgmi.connected_to_cpu) {
3958 arch_phys_wc_del(adev->gmc.vram_mtrr);
3959 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3960 }
3961}
3962
d38ceaf9 3963/**
bbe04dec 3964 * amdgpu_device_fini_hw - tear down the driver
d38ceaf9
AD
3965 *
3966 * @adev: amdgpu_device pointer
3967 *
3968 * Tear down the driver info (all asics).
3969 * Called at driver shutdown.
3970 */
72c8c97b 3971void amdgpu_device_fini_hw(struct amdgpu_device *adev)
d38ceaf9 3972{
aac89168 3973 dev_info(adev->dev, "amdgpu: finishing device.\n");
9f875167 3974 flush_delayed_work(&adev->delayed_init_work);
691191a2
YW
3975 if (adev->mman.initialized) {
3976 flush_delayed_work(&adev->mman.bdev.wq);
e78b3197 3977 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
691191a2 3978 }
d0d13fe8 3979 adev->shutdown = true;
9f875167 3980
752c683d
ML
3981 /* make sure IB test finished before entering exclusive mode
3982 * to avoid preemption on IB test
3983 * */
519b8b76 3984 if (amdgpu_sriov_vf(adev)) {
752c683d 3985 amdgpu_virt_request_full_gpu(adev, false);
519b8b76
BZ
3986 amdgpu_virt_fini_data_exchange(adev);
3987 }
752c683d 3988
e5b03032
ML
3989 /* disable all interrupts */
3990 amdgpu_irq_disable_all(adev);
ff97cba8 3991 if (adev->mode_info.mode_config_initialized){
1053b9c9 3992 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
4a580877 3993 drm_helper_force_disable_all(adev_to_drm(adev));
ff97cba8 3994 else
4a580877 3995 drm_atomic_helper_shutdown(adev_to_drm(adev));
ff97cba8 3996 }
8d35a259 3997 amdgpu_fence_driver_hw_fini(adev);
72c8c97b 3998
7c868b59
YT
3999 if (adev->pm_sysfs_en)
4000 amdgpu_pm_sysfs_fini(adev);
72c8c97b
AG
4001 if (adev->ucode_sysfs_en)
4002 amdgpu_ucode_sysfs_fini(adev);
4003 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
4004
232d1d43
SY
4005 /* disable ras feature must before hw fini */
4006 amdgpu_ras_pre_fini(adev);
4007
e9669fb7 4008 amdgpu_device_ip_fini_early(adev);
d10d0daa 4009
a3848df6
YW
4010 amdgpu_irq_fini_hw(adev);
4011
b6fd6e0f
SK
4012 if (adev->mman.initialized)
4013 ttm_device_clear_dma_mappings(&adev->mman.bdev);
894c6890 4014
d10d0daa 4015 amdgpu_gart_dummy_page_fini(adev);
07775fc1 4016
87172e89
LS
4017 if (drm_dev_is_unplugged(adev_to_drm(adev)))
4018 amdgpu_device_unmap_mmio(adev);
4019
72c8c97b
AG
4020}
4021
4022void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4023{
62d5f9f7
LS
4024 int idx;
4025
8d35a259 4026 amdgpu_fence_driver_sw_fini(adev);
a5c5d8d5 4027 amdgpu_device_ip_fini(adev);
75e1658e
ND
4028 release_firmware(adev->firmware.gpu_info_fw);
4029 adev->firmware.gpu_info_fw = NULL;
d38ceaf9 4030 adev->accel_working = false;
04442bf7
LL
4031
4032 amdgpu_reset_fini(adev);
4033
d38ceaf9 4034 /* free i2c buses */
4562236b
HW
4035 if (!amdgpu_device_has_dc_support(adev))
4036 amdgpu_i2c_fini(adev);
bfca0289
SL
4037
4038 if (amdgpu_emu_mode != 1)
4039 amdgpu_atombios_fini(adev);
4040
d38ceaf9
AD
4041 kfree(adev->bios);
4042 adev->bios = NULL;
b98c6299 4043 if (amdgpu_device_supports_px(adev_to_drm(adev))) {
84c8b22e 4044 vga_switcheroo_unregister_client(adev->pdev);
83ba126a 4045 vga_switcheroo_fini_domain_pm_ops(adev->dev);
b98c6299 4046 }
38d6be81 4047 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
b8779475 4048 vga_client_unregister(adev->pdev);
e9bc1bf7 4049
62d5f9f7
LS
4050 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4051
4052 iounmap(adev->rmmio);
4053 adev->rmmio = NULL;
4054 amdgpu_device_doorbell_fini(adev);
4055 drm_dev_exit(idx);
4056 }
4057
d155bef0
AB
4058 if (IS_ENABLED(CONFIG_PERF_EVENTS))
4059 amdgpu_pmu_fini(adev);
72de33f8 4060 if (adev->mman.discovery_bin)
a190d1c7 4061 amdgpu_discovery_fini(adev);
72c8c97b 4062
cfbb6b00
AG
4063 amdgpu_reset_put_reset_domain(adev->reset_domain);
4064 adev->reset_domain = NULL;
4065
72c8c97b
AG
4066 kfree(adev->pci_state);
4067
d38ceaf9
AD
4068}
4069
58144d28
ND
4070/**
4071 * amdgpu_device_evict_resources - evict device resources
4072 * @adev: amdgpu device object
4073 *
4074 * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4075 * of the vram memory type. Mainly used for evicting device resources
4076 * at suspend time.
4077 *
4078 */
4079static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
4080{
e53d9665
ML
4081 /* No need to evict vram on APUs for suspend to ram or s2idle */
4082 if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
58144d28
ND
4083 return;
4084
4085 if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM))
4086 DRM_WARN("evicting device resources failed\n");
4087
4088}
d38ceaf9
AD
4089
4090/*
4091 * Suspend & resume.
4092 */
4093/**
810ddc3a 4094 * amdgpu_device_suspend - initiate device suspend
d38ceaf9 4095 *
87e3f136 4096 * @dev: drm dev pointer
87e3f136 4097 * @fbcon : notify the fbdev of suspend
d38ceaf9
AD
4098 *
4099 * Puts the hw in the suspend state (all asics).
4100 * Returns 0 for success or an error on failure.
4101 * Called at driver suspend.
4102 */
de185019 4103int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
d38ceaf9 4104{
a2e15b0e 4105 struct amdgpu_device *adev = drm_to_adev(dev);
d38ceaf9 4106
d38ceaf9
AD
4107 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4108 return 0;
4109
44779b43 4110 adev->in_suspend = true;
3fa8f89d
S
4111
4112 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4113 DRM_WARN("smart shift update failed\n");
4114
d38ceaf9
AD
4115 drm_kms_helper_poll_disable(dev);
4116
5f818173 4117 if (fbcon)
087451f3 4118 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
5f818173 4119
beff74bc 4120 cancel_delayed_work_sync(&adev->delayed_init_work);
a5459475 4121
5e6932fe 4122 amdgpu_ras_suspend(adev);
4123
2196927b 4124 amdgpu_device_ip_suspend_phase1(adev);
fe1053b7 4125
5d3a2d95
AD
4126 if (!adev->in_s0ix)
4127 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
94fa5660 4128
58144d28 4129 amdgpu_device_evict_resources(adev);
d38ceaf9 4130
8d35a259 4131 amdgpu_fence_driver_hw_fini(adev);
d38ceaf9 4132
2196927b 4133 amdgpu_device_ip_suspend_phase2(adev);
d38ceaf9 4134
d38ceaf9
AD
4135 return 0;
4136}
4137
4138/**
810ddc3a 4139 * amdgpu_device_resume - initiate device resume
d38ceaf9 4140 *
87e3f136 4141 * @dev: drm dev pointer
87e3f136 4142 * @fbcon : notify the fbdev of resume
d38ceaf9
AD
4143 *
4144 * Bring the hw back to operating state (all asics).
4145 * Returns 0 for success or an error on failure.
4146 * Called at driver resume.
4147 */
de185019 4148int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
d38ceaf9 4149{
1348969a 4150 struct amdgpu_device *adev = drm_to_adev(dev);
03161a6e 4151 int r = 0;
d38ceaf9
AD
4152
4153 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4154 return 0;
4155
62498733 4156 if (adev->in_s0ix)
bc143d8b 4157 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
628c36d7 4158
d38ceaf9 4159 /* post card */
39c640c0 4160 if (amdgpu_device_need_post(adev)) {
4d2997ab 4161 r = amdgpu_device_asic_init(adev);
74b0b157 4162 if (r)
aac89168 4163 dev_err(adev->dev, "amdgpu asic init failed\n");
74b0b157 4164 }
d38ceaf9 4165
06ec9070 4166 r = amdgpu_device_ip_resume(adev);
e6707218 4167 if (r) {
aac89168 4168 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4d3b9ae5 4169 return r;
e6707218 4170 }
8d35a259 4171 amdgpu_fence_driver_hw_init(adev);
5ceb54c6 4172
06ec9070 4173 r = amdgpu_device_ip_late_init(adev);
03161a6e 4174 if (r)
4d3b9ae5 4175 return r;
d38ceaf9 4176
beff74bc
AD
4177 queue_delayed_work(system_wq, &adev->delayed_init_work,
4178 msecs_to_jiffies(AMDGPU_RESUME_MS));
4179
5d3a2d95
AD
4180 if (!adev->in_s0ix) {
4181 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4182 if (r)
4183 return r;
4184 }
756e6880 4185
96a5d8d4 4186 /* Make sure IB tests flushed */
beff74bc 4187 flush_delayed_work(&adev->delayed_init_work);
96a5d8d4 4188
a2e15b0e 4189 if (fbcon)
087451f3 4190 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
d38ceaf9
AD
4191
4192 drm_kms_helper_poll_enable(dev);
23a1a9e5 4193
5e6932fe 4194 amdgpu_ras_resume(adev);
4195
23a1a9e5
L
4196 /*
4197 * Most of the connector probing functions try to acquire runtime pm
4198 * refs to ensure that the GPU is powered on when connector polling is
4199 * performed. Since we're calling this from a runtime PM callback,
4200 * trying to acquire rpm refs will cause us to deadlock.
4201 *
4202 * Since we're guaranteed to be holding the rpm lock, it's safe to
4203 * temporarily disable the rpm helpers so this doesn't deadlock us.
4204 */
4205#ifdef CONFIG_PM
4206 dev->dev->power.disable_depth++;
4207#endif
4562236b
HW
4208 if (!amdgpu_device_has_dc_support(adev))
4209 drm_helper_hpd_irq_event(dev);
4210 else
4211 drm_kms_helper_hotplug_event(dev);
23a1a9e5
L
4212#ifdef CONFIG_PM
4213 dev->dev->power.disable_depth--;
4214#endif
44779b43
RZ
4215 adev->in_suspend = false;
4216
3fa8f89d
S
4217 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4218 DRM_WARN("smart shift update failed\n");
4219
4d3b9ae5 4220 return 0;
d38ceaf9
AD
4221}
4222
e3ecdffa
AD
4223/**
4224 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4225 *
4226 * @adev: amdgpu_device pointer
4227 *
4228 * The list of all the hardware IPs that make up the asic is walked and
4229 * the check_soft_reset callbacks are run. check_soft_reset determines
4230 * if the asic is still hung or not.
4231 * Returns true if any of the IPs are still in a hung state, false if not.
4232 */
06ec9070 4233static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
63fbf42f
CZ
4234{
4235 int i;
4236 bool asic_hang = false;
4237
f993d628
ML
4238 if (amdgpu_sriov_vf(adev))
4239 return true;
4240
8bc04c29
AD
4241 if (amdgpu_asic_need_full_reset(adev))
4242 return true;
4243
63fbf42f 4244 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 4245 if (!adev->ip_blocks[i].status.valid)
63fbf42f 4246 continue;
a1255107
AD
4247 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4248 adev->ip_blocks[i].status.hang =
4249 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4250 if (adev->ip_blocks[i].status.hang) {
aac89168 4251 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
63fbf42f
CZ
4252 asic_hang = true;
4253 }
4254 }
4255 return asic_hang;
4256}
4257
e3ecdffa
AD
4258/**
4259 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4260 *
4261 * @adev: amdgpu_device pointer
4262 *
4263 * The list of all the hardware IPs that make up the asic is walked and the
4264 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
4265 * handles any IP specific hardware or software state changes that are
4266 * necessary for a soft reset to succeed.
4267 * Returns 0 on success, negative error code on failure.
4268 */
06ec9070 4269static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
d31a501e
CZ
4270{
4271 int i, r = 0;
4272
4273 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 4274 if (!adev->ip_blocks[i].status.valid)
d31a501e 4275 continue;
a1255107
AD
4276 if (adev->ip_blocks[i].status.hang &&
4277 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4278 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
d31a501e
CZ
4279 if (r)
4280 return r;
4281 }
4282 }
4283
4284 return 0;
4285}
4286
e3ecdffa
AD
4287/**
4288 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4289 *
4290 * @adev: amdgpu_device pointer
4291 *
4292 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
4293 * reset is necessary to recover.
4294 * Returns true if a full asic reset is required, false if not.
4295 */
06ec9070 4296static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
35d782fe 4297{
da146d3b
AD
4298 int i;
4299
8bc04c29
AD
4300 if (amdgpu_asic_need_full_reset(adev))
4301 return true;
4302
da146d3b 4303 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 4304 if (!adev->ip_blocks[i].status.valid)
da146d3b 4305 continue;
a1255107
AD
4306 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4307 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4308 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
98512bb8
KW
4309 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4310 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
a1255107 4311 if (adev->ip_blocks[i].status.hang) {
aac89168 4312 dev_info(adev->dev, "Some block need full reset!\n");
da146d3b
AD
4313 return true;
4314 }
4315 }
35d782fe
CZ
4316 }
4317 return false;
4318}
4319
e3ecdffa
AD
4320/**
4321 * amdgpu_device_ip_soft_reset - do a soft reset
4322 *
4323 * @adev: amdgpu_device pointer
4324 *
4325 * The list of all the hardware IPs that make up the asic is walked and the
4326 * soft_reset callbacks are run if the block is hung. soft_reset handles any
4327 * IP specific hardware or software state changes that are necessary to soft
4328 * reset the IP.
4329 * Returns 0 on success, negative error code on failure.
4330 */
06ec9070 4331static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
35d782fe
CZ
4332{
4333 int i, r = 0;
4334
4335 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 4336 if (!adev->ip_blocks[i].status.valid)
35d782fe 4337 continue;
a1255107
AD
4338 if (adev->ip_blocks[i].status.hang &&
4339 adev->ip_blocks[i].version->funcs->soft_reset) {
4340 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
35d782fe
CZ
4341 if (r)
4342 return r;
4343 }
4344 }
4345
4346 return 0;
4347}
4348
e3ecdffa
AD
4349/**
4350 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4351 *
4352 * @adev: amdgpu_device pointer
4353 *
4354 * The list of all the hardware IPs that make up the asic is walked and the
4355 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
4356 * handles any IP specific hardware or software state changes that are
4357 * necessary after the IP has been soft reset.
4358 * Returns 0 on success, negative error code on failure.
4359 */
06ec9070 4360static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
35d782fe
CZ
4361{
4362 int i, r = 0;
4363
4364 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 4365 if (!adev->ip_blocks[i].status.valid)
35d782fe 4366 continue;
a1255107
AD
4367 if (adev->ip_blocks[i].status.hang &&
4368 adev->ip_blocks[i].version->funcs->post_soft_reset)
4369 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
35d782fe
CZ
4370 if (r)
4371 return r;
4372 }
4373
4374 return 0;
4375}
4376
e3ecdffa 4377/**
c33adbc7 4378 * amdgpu_device_recover_vram - Recover some VRAM contents
e3ecdffa
AD
4379 *
4380 * @adev: amdgpu_device pointer
4381 *
4382 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
4383 * restore things like GPUVM page tables after a GPU reset where
4384 * the contents of VRAM might be lost.
403009bf
CK
4385 *
4386 * Returns:
4387 * 0 on success, negative error code on failure.
e3ecdffa 4388 */
c33adbc7 4389static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
c41d1cf6 4390{
c41d1cf6 4391 struct dma_fence *fence = NULL, *next = NULL;
403009bf 4392 struct amdgpu_bo *shadow;
e18aaea7 4393 struct amdgpu_bo_vm *vmbo;
403009bf 4394 long r = 1, tmo;
c41d1cf6
ML
4395
4396 if (amdgpu_sriov_runtime(adev))
b045d3af 4397 tmo = msecs_to_jiffies(8000);
c41d1cf6
ML
4398 else
4399 tmo = msecs_to_jiffies(100);
4400
aac89168 4401 dev_info(adev->dev, "recover vram bo from shadow start\n");
c41d1cf6 4402 mutex_lock(&adev->shadow_list_lock);
e18aaea7
ND
4403 list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4404 shadow = &vmbo->bo;
403009bf 4405 /* No need to recover an evicted BO */
d3116756
CK
4406 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4407 shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4408 shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
403009bf
CK
4409 continue;
4410
4411 r = amdgpu_bo_restore_shadow(shadow, &next);
4412 if (r)
4413 break;
4414
c41d1cf6 4415 if (fence) {
1712fb1a 4416 tmo = dma_fence_wait_timeout(fence, false, tmo);
403009bf
CK
4417 dma_fence_put(fence);
4418 fence = next;
1712fb1a 4419 if (tmo == 0) {
4420 r = -ETIMEDOUT;
c41d1cf6 4421 break;
1712fb1a 4422 } else if (tmo < 0) {
4423 r = tmo;
4424 break;
4425 }
403009bf
CK
4426 } else {
4427 fence = next;
c41d1cf6 4428 }
c41d1cf6
ML
4429 }
4430 mutex_unlock(&adev->shadow_list_lock);
4431
403009bf
CK
4432 if (fence)
4433 tmo = dma_fence_wait_timeout(fence, false, tmo);
c41d1cf6
ML
4434 dma_fence_put(fence);
4435
1712fb1a 4436 if (r < 0 || tmo <= 0) {
aac89168 4437 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
403009bf
CK
4438 return -EIO;
4439 }
c41d1cf6 4440
aac89168 4441 dev_info(adev->dev, "recover vram bo from shadow done\n");
403009bf 4442 return 0;
c41d1cf6
ML
4443}
4444
a90ad3c2 4445
e3ecdffa 4446/**
06ec9070 4447 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
5740682e 4448 *
982a820b 4449 * @adev: amdgpu_device pointer
87e3f136 4450 * @from_hypervisor: request from hypervisor
5740682e
ML
4451 *
4452 * do VF FLR and reinitialize Asic
3f48c681 4453 * return 0 means succeeded otherwise failed
e3ecdffa
AD
4454 */
4455static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4456 bool from_hypervisor)
5740682e
ML
4457{
4458 int r;
a5f67c93 4459 struct amdgpu_hive_info *hive = NULL;
7258fa31 4460 int retry_limit = 0;
5740682e 4461
7258fa31 4462retry:
992110d7 4463 amdgpu_amdkfd_pre_reset(adev);
5740682e 4464
428890a3 4465 amdgpu_amdkfd_pre_reset(adev);
4466
5740682e
ML
4467 if (from_hypervisor)
4468 r = amdgpu_virt_request_full_gpu(adev, true);
4469 else
4470 r = amdgpu_virt_reset_gpu(adev);
4471 if (r)
4472 return r;
a90ad3c2
ML
4473
4474 /* Resume IP prior to SMC */
06ec9070 4475 r = amdgpu_device_ip_reinit_early_sriov(adev);
5740682e
ML
4476 if (r)
4477 goto error;
a90ad3c2 4478
c9ffa427 4479 amdgpu_virt_init_data_exchange(adev);
a90ad3c2 4480
7a3e0bb2
RZ
4481 r = amdgpu_device_fw_loading(adev);
4482 if (r)
4483 return r;
4484
a90ad3c2 4485 /* now we are okay to resume SMC/CP/SDMA */
06ec9070 4486 r = amdgpu_device_ip_reinit_late_sriov(adev);
5740682e
ML
4487 if (r)
4488 goto error;
a90ad3c2 4489
a5f67c93
ZL
4490 hive = amdgpu_get_xgmi_hive(adev);
4491 /* Update PSP FW topology after reset */
4492 if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4493 r = amdgpu_xgmi_update_topology(hive, adev);
4494
4495 if (hive)
4496 amdgpu_put_xgmi_hive(hive);
4497
4498 if (!r) {
4499 amdgpu_irq_gpu_reset_resume_helper(adev);
4500 r = amdgpu_ib_ring_tests(adev);
4501 amdgpu_amdkfd_post_reset(adev);
4502 }
a90ad3c2 4503
abc34253 4504error:
c41d1cf6 4505 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
e3526257 4506 amdgpu_inc_vram_lost(adev);
c33adbc7 4507 r = amdgpu_device_recover_vram(adev);
a90ad3c2 4508 }
437f3e0b 4509 amdgpu_virt_release_full_gpu(adev, true);
a90ad3c2 4510
7258fa31
SK
4511 if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4512 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4513 retry_limit++;
4514 goto retry;
4515 } else
4516 DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4517 }
4518
a90ad3c2
ML
4519 return r;
4520}
4521
9a1cddd6 4522/**
4523 * amdgpu_device_has_job_running - check if there is any job in mirror list
4524 *
982a820b 4525 * @adev: amdgpu_device pointer
9a1cddd6 4526 *
4527 * check if there is any job in mirror list
4528 */
4529bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4530{
4531 int i;
4532 struct drm_sched_job *job;
4533
4534 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4535 struct amdgpu_ring *ring = adev->rings[i];
4536
4537 if (!ring || !ring->sched.thread)
4538 continue;
4539
4540 spin_lock(&ring->sched.job_list_lock);
6efa4b46
LT
4541 job = list_first_entry_or_null(&ring->sched.pending_list,
4542 struct drm_sched_job, list);
9a1cddd6 4543 spin_unlock(&ring->sched.job_list_lock);
4544 if (job)
4545 return true;
4546 }
4547 return false;
4548}
4549
12938fad
CK
4550/**
4551 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4552 *
982a820b 4553 * @adev: amdgpu_device pointer
12938fad
CK
4554 *
4555 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4556 * a hung GPU.
4557 */
4558bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4559{
4560 if (!amdgpu_device_ip_check_soft_reset(adev)) {
aac89168 4561 dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
12938fad
CK
4562 return false;
4563 }
4564
3ba7b418
AG
4565 if (amdgpu_gpu_recovery == 0)
4566 goto disabled;
4567
4568 if (amdgpu_sriov_vf(adev))
4569 return true;
4570
4571 if (amdgpu_gpu_recovery == -1) {
4572 switch (adev->asic_type) {
b3523c45
AD
4573#ifdef CONFIG_DRM_AMDGPU_SI
4574 case CHIP_VERDE:
4575 case CHIP_TAHITI:
4576 case CHIP_PITCAIRN:
4577 case CHIP_OLAND:
4578 case CHIP_HAINAN:
4579#endif
4580#ifdef CONFIG_DRM_AMDGPU_CIK
4581 case CHIP_KAVERI:
4582 case CHIP_KABINI:
4583 case CHIP_MULLINS:
4584#endif
4585 case CHIP_CARRIZO:
4586 case CHIP_STONEY:
4587 case CHIP_CYAN_SKILLFISH:
3ba7b418 4588 goto disabled;
b3523c45
AD
4589 default:
4590 break;
3ba7b418 4591 }
12938fad
CK
4592 }
4593
4594 return true;
3ba7b418
AG
4595
4596disabled:
aac89168 4597 dev_info(adev->dev, "GPU recovery disabled.\n");
3ba7b418 4598 return false;
12938fad
CK
4599}
4600
5c03e584
FX
4601int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4602{
4603 u32 i;
4604 int ret = 0;
4605
4606 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4607
4608 dev_info(adev->dev, "GPU mode1 reset\n");
4609
4610 /* disable BM */
4611 pci_clear_master(adev->pdev);
4612
4613 amdgpu_device_cache_pci_state(adev->pdev);
4614
4615 if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4616 dev_info(adev->dev, "GPU smu mode1 reset\n");
4617 ret = amdgpu_dpm_mode1_reset(adev);
4618 } else {
4619 dev_info(adev->dev, "GPU psp mode1 reset\n");
4620 ret = psp_gpu_reset(adev);
4621 }
4622
4623 if (ret)
4624 dev_err(adev->dev, "GPU mode1 reset failed\n");
4625
4626 amdgpu_device_load_pci_state(adev->pdev);
4627
4628 /* wait for asic to come out of reset */
4629 for (i = 0; i < adev->usec_timeout; i++) {
4630 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4631
4632 if (memsize != 0xffffffff)
4633 break;
4634 udelay(1);
4635 }
4636
4637 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4638 return ret;
4639}
5c6dd71e 4640
e3c1b071 4641int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
04442bf7 4642 struct amdgpu_reset_context *reset_context)
26bc5340 4643{
5c1e6fa4 4644 int i, r = 0;
04442bf7
LL
4645 struct amdgpu_job *job = NULL;
4646 bool need_full_reset =
4647 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4648
4649 if (reset_context->reset_req_dev == adev)
4650 job = reset_context->job;
71182665 4651
b602ca5f
TZ
4652 if (amdgpu_sriov_vf(adev)) {
4653 /* stop the data exchange thread */
4654 amdgpu_virt_fini_data_exchange(adev);
4655 }
4656
71182665 4657 /* block all schedulers and reset given job's ring */
0875dc9e
CZ
4658 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4659 struct amdgpu_ring *ring = adev->rings[i];
4660
51687759 4661 if (!ring || !ring->sched.thread)
0875dc9e 4662 continue;
5740682e 4663
c530b02f
JZ
4664 /*clear job fence from fence drv to avoid force_completion
4665 *leave NULL and vm flush fence in fence drv */
5c1e6fa4 4666 amdgpu_fence_driver_clear_job_fences(ring);
c530b02f 4667
2f9d4084
ML
4668 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4669 amdgpu_fence_driver_force_completion(ring);
0875dc9e 4670 }
d38ceaf9 4671
ff99849b 4672 if (job && job->vm)
222b5f04
AG
4673 drm_sched_increase_karma(&job->base);
4674
04442bf7 4675 r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
404b277b
LL
4676 /* If reset handler not implemented, continue; otherwise return */
4677 if (r == -ENOSYS)
4678 r = 0;
4679 else
04442bf7
LL
4680 return r;
4681
1d721ed6 4682 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
26bc5340
AG
4683 if (!amdgpu_sriov_vf(adev)) {
4684
4685 if (!need_full_reset)
4686 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4687
4688 if (!need_full_reset) {
4689 amdgpu_device_ip_pre_soft_reset(adev);
4690 r = amdgpu_device_ip_soft_reset(adev);
4691 amdgpu_device_ip_post_soft_reset(adev);
4692 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
aac89168 4693 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
26bc5340
AG
4694 need_full_reset = true;
4695 }
4696 }
4697
4698 if (need_full_reset)
4699 r = amdgpu_device_ip_suspend(adev);
04442bf7
LL
4700 if (need_full_reset)
4701 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4702 else
4703 clear_bit(AMDGPU_NEED_FULL_RESET,
4704 &reset_context->flags);
26bc5340
AG
4705 }
4706
4707 return r;
4708}
4709
15fd09a0
SA
4710static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
4711{
4712 uint32_t reg_value;
4713 int i;
4714
38a15ad9 4715 lockdep_assert_held(&adev->reset_domain->sem);
15fd09a0
SA
4716 dump_stack();
4717
4718 for (i = 0; i < adev->num_regs; i++) {
4719 reg_value = RREG32(adev->reset_dump_reg_list[i]);
4720 trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i], reg_value);
4721 }
4722
4723 return 0;
4724}
4725
04442bf7
LL
4726int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4727 struct amdgpu_reset_context *reset_context)
26bc5340
AG
4728{
4729 struct amdgpu_device *tmp_adev = NULL;
04442bf7 4730 bool need_full_reset, skip_hw_reset, vram_lost = false;
26bc5340
AG
4731 int r = 0;
4732
04442bf7
LL
4733 /* Try reset handler method first */
4734 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4735 reset_list);
15fd09a0 4736 amdgpu_reset_reg_dumps(tmp_adev);
04442bf7 4737 r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
404b277b
LL
4738 /* If reset handler not implemented, continue; otherwise return */
4739 if (r == -ENOSYS)
4740 r = 0;
4741 else
04442bf7
LL
4742 return r;
4743
4744 /* Reset handler not implemented, use the default method */
4745 need_full_reset =
4746 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4747 skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4748
26bc5340 4749 /*
655ce9cb 4750 * ASIC reset has to be done on all XGMI hive nodes ASAP
26bc5340
AG
4751 * to allow proper links negotiation in FW (within 1 sec)
4752 */
7ac71382 4753 if (!skip_hw_reset && need_full_reset) {
655ce9cb 4754 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
041a62bc 4755 /* For XGMI run all resets in parallel to speed up the process */
d4535e2c 4756 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
e3c1b071 4757 tmp_adev->gmc.xgmi.pending_reset = false;
c96cf282 4758 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
d4535e2c
AG
4759 r = -EALREADY;
4760 } else
4761 r = amdgpu_asic_reset(tmp_adev);
d4535e2c 4762
041a62bc 4763 if (r) {
aac89168 4764 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4a580877 4765 r, adev_to_drm(tmp_adev)->unique);
041a62bc 4766 break;
ce316fa5
LM
4767 }
4768 }
4769
041a62bc
AG
4770 /* For XGMI wait for all resets to complete before proceed */
4771 if (!r) {
655ce9cb 4772 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
ce316fa5
LM
4773 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4774 flush_work(&tmp_adev->xgmi_reset_work);
4775 r = tmp_adev->asic_reset_res;
4776 if (r)
4777 break;
ce316fa5
LM
4778 }
4779 }
4780 }
ce316fa5 4781 }
26bc5340 4782
43c4d576 4783 if (!r && amdgpu_ras_intr_triggered()) {
655ce9cb 4784 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5e67bba3 4785 if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
4786 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
4787 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
43c4d576
JC
4788 }
4789
00eaa571 4790 amdgpu_ras_intr_cleared();
43c4d576 4791 }
00eaa571 4792
655ce9cb 4793 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
26bc5340
AG
4794 if (need_full_reset) {
4795 /* post card */
e3c1b071 4796 r = amdgpu_device_asic_init(tmp_adev);
4797 if (r) {
aac89168 4798 dev_warn(tmp_adev->dev, "asic atom init failed!");
e3c1b071 4799 } else {
26bc5340 4800 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
9cec53c1
JZ
4801 r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4802 if (r)
4803 goto out;
4804
26bc5340
AG
4805 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4806 if (r)
4807 goto out;
4808
4809 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4810 if (vram_lost) {
77e7f829 4811 DRM_INFO("VRAM is lost due to GPU reset!\n");
e3526257 4812 amdgpu_inc_vram_lost(tmp_adev);
26bc5340
AG
4813 }
4814
26bc5340
AG
4815 r = amdgpu_device_fw_loading(tmp_adev);
4816 if (r)
4817 return r;
4818
4819 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4820 if (r)
4821 goto out;
4822
4823 if (vram_lost)
4824 amdgpu_device_fill_reset_magic(tmp_adev);
4825
fdafb359
EQ
4826 /*
4827 * Add this ASIC as tracked as reset was already
4828 * complete successfully.
4829 */
4830 amdgpu_register_gpu_instance(tmp_adev);
4831
04442bf7
LL
4832 if (!reset_context->hive &&
4833 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
e3c1b071 4834 amdgpu_xgmi_add_device(tmp_adev);
4835
7c04ca50 4836 r = amdgpu_device_ip_late_init(tmp_adev);
4837 if (r)
4838 goto out;
4839
087451f3 4840 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
565d1941 4841
e8fbaf03
GC
4842 /*
4843 * The GPU enters bad state once faulty pages
4844 * by ECC has reached the threshold, and ras
4845 * recovery is scheduled next. So add one check
4846 * here to break recovery if it indeed exceeds
4847 * bad page threshold, and remind user to
4848 * retire this GPU or setting one bigger
4849 * bad_page_threshold value to fix this once
4850 * probing driver again.
4851 */
11003c68 4852 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
e8fbaf03
GC
4853 /* must succeed. */
4854 amdgpu_ras_resume(tmp_adev);
4855 } else {
4856 r = -EINVAL;
4857 goto out;
4858 }
e79a04d5 4859
26bc5340 4860 /* Update PSP FW topology after reset */
04442bf7
LL
4861 if (reset_context->hive &&
4862 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4863 r = amdgpu_xgmi_update_topology(
4864 reset_context->hive, tmp_adev);
26bc5340
AG
4865 }
4866 }
4867
26bc5340
AG
4868out:
4869 if (!r) {
4870 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4871 r = amdgpu_ib_ring_tests(tmp_adev);
4872 if (r) {
4873 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
26bc5340
AG
4874 need_full_reset = true;
4875 r = -EAGAIN;
4876 goto end;
4877 }
4878 }
4879
4880 if (!r)
4881 r = amdgpu_device_recover_vram(tmp_adev);
4882 else
4883 tmp_adev->asic_reset_res = r;
4884 }
4885
4886end:
04442bf7
LL
4887 if (need_full_reset)
4888 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4889 else
4890 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
26bc5340
AG
4891 return r;
4892}
4893
e923be99 4894static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
26bc5340 4895{
5740682e 4896
a3a09142
AD
4897 switch (amdgpu_asic_reset_method(adev)) {
4898 case AMD_RESET_METHOD_MODE1:
4899 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4900 break;
4901 case AMD_RESET_METHOD_MODE2:
4902 adev->mp1_state = PP_MP1_STATE_RESET;
4903 break;
4904 default:
4905 adev->mp1_state = PP_MP1_STATE_NONE;
4906 break;
4907 }
26bc5340 4908}
d38ceaf9 4909
e923be99 4910static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
26bc5340 4911{
89041940 4912 amdgpu_vf_error_trans_all(adev);
a3a09142 4913 adev->mp1_state = PP_MP1_STATE_NONE;
91fb309d
HC
4914}
4915
3f12acc8
EQ
4916static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4917{
4918 struct pci_dev *p = NULL;
4919
4920 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4921 adev->pdev->bus->number, 1);
4922 if (p) {
4923 pm_runtime_enable(&(p->dev));
4924 pm_runtime_resume(&(p->dev));
4925 }
4926}
4927
4928static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4929{
4930 enum amd_reset_method reset_method;
4931 struct pci_dev *p = NULL;
4932 u64 expires;
4933
4934 /*
4935 * For now, only BACO and mode1 reset are confirmed
4936 * to suffer the audio issue without proper suspended.
4937 */
4938 reset_method = amdgpu_asic_reset_method(adev);
4939 if ((reset_method != AMD_RESET_METHOD_BACO) &&
4940 (reset_method != AMD_RESET_METHOD_MODE1))
4941 return -EINVAL;
4942
4943 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4944 adev->pdev->bus->number, 1);
4945 if (!p)
4946 return -ENODEV;
4947
4948 expires = pm_runtime_autosuspend_expiration(&(p->dev));
4949 if (!expires)
4950 /*
4951 * If we cannot get the audio device autosuspend delay,
4952 * a fixed 4S interval will be used. Considering 3S is
4953 * the audio controller default autosuspend delay setting.
4954 * 4S used here is guaranteed to cover that.
4955 */
54b7feb9 4956 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
3f12acc8
EQ
4957
4958 while (!pm_runtime_status_suspended(&(p->dev))) {
4959 if (!pm_runtime_suspend(&(p->dev)))
4960 break;
4961
4962 if (expires < ktime_get_mono_fast_ns()) {
4963 dev_warn(adev->dev, "failed to suspend display audio\n");
4964 /* TODO: abort the succeeding gpu reset? */
4965 return -ETIMEDOUT;
4966 }
4967 }
4968
4969 pm_runtime_disable(&(p->dev));
4970
4971 return 0;
4972}
4973
9d8d96be 4974static void amdgpu_device_recheck_guilty_jobs(
04442bf7
LL
4975 struct amdgpu_device *adev, struct list_head *device_list_handle,
4976 struct amdgpu_reset_context *reset_context)
e6c6338f
JZ
4977{
4978 int i, r = 0;
4979
4980 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4981 struct amdgpu_ring *ring = adev->rings[i];
4982 int ret = 0;
4983 struct drm_sched_job *s_job;
4984
4985 if (!ring || !ring->sched.thread)
4986 continue;
4987
4988 s_job = list_first_entry_or_null(&ring->sched.pending_list,
4989 struct drm_sched_job, list);
4990 if (s_job == NULL)
4991 continue;
4992
4993 /* clear job's guilty and depend the folowing step to decide the real one */
4994 drm_sched_reset_karma(s_job);
38d4e463
JC
4995 /* for the real bad job, it will be resubmitted twice, adding a dma_fence_get
4996 * to make sure fence is balanced */
4997 dma_fence_get(s_job->s_fence->parent);
e6c6338f
JZ
4998 drm_sched_resubmit_jobs_ext(&ring->sched, 1);
4999
5000 ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
5001 if (ret == 0) { /* timeout */
5002 DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
5003 ring->sched.name, s_job->id);
5004
5005 /* set guilty */
5006 drm_sched_increase_karma(s_job);
5007retry:
5008 /* do hw reset */
5009 if (amdgpu_sriov_vf(adev)) {
5010 amdgpu_virt_fini_data_exchange(adev);
5011 r = amdgpu_device_reset_sriov(adev, false);
5012 if (r)
5013 adev->asic_reset_res = r;
5014 } else {
04442bf7
LL
5015 clear_bit(AMDGPU_SKIP_HW_RESET,
5016 &reset_context->flags);
5017 r = amdgpu_do_asic_reset(device_list_handle,
5018 reset_context);
e6c6338f
JZ
5019 if (r && r == -EAGAIN)
5020 goto retry;
5021 }
5022
5023 /*
5024 * add reset counter so that the following
5025 * resubmitted job could flush vmid
5026 */
5027 atomic_inc(&adev->gpu_reset_counter);
5028 continue;
5029 }
5030
5031 /* got the hw fence, signal finished fence */
5032 atomic_dec(ring->sched.score);
38d4e463 5033 dma_fence_put(s_job->s_fence->parent);
e6c6338f
JZ
5034 dma_fence_get(&s_job->s_fence->finished);
5035 dma_fence_signal(&s_job->s_fence->finished);
5036 dma_fence_put(&s_job->s_fence->finished);
5037
5038 /* remove node from list and free the job */
5039 spin_lock(&ring->sched.job_list_lock);
5040 list_del_init(&s_job->list);
5041 spin_unlock(&ring->sched.job_list_lock);
5042 ring->sched.ops->free_job(s_job);
5043 }
5044}
5045
26bc5340 5046/**
c7703ce3 5047 * amdgpu_device_gpu_recover_imp - reset the asic and recover scheduler
26bc5340 5048 *
982a820b 5049 * @adev: amdgpu_device pointer
26bc5340
AG
5050 * @job: which job trigger hang
5051 *
5052 * Attempt to reset the GPU if it has hung (all asics).
5053 * Attempt to do soft-reset or full-reset and reinitialize Asic
5054 * Returns 0 for success or an error on failure.
5055 */
5056
54f329cc 5057int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
26bc5340
AG
5058 struct amdgpu_job *job)
5059{
1d721ed6 5060 struct list_head device_list, *device_list_handle = NULL;
7dd8c205 5061 bool job_signaled = false;
26bc5340 5062 struct amdgpu_hive_info *hive = NULL;
26bc5340 5063 struct amdgpu_device *tmp_adev = NULL;
1d721ed6 5064 int i, r = 0;
bb5c7235 5065 bool need_emergency_restart = false;
3f12acc8 5066 bool audio_suspended = false;
e6c6338f 5067 int tmp_vram_lost_counter;
04442bf7
LL
5068 struct amdgpu_reset_context reset_context;
5069
5070 memset(&reset_context, 0, sizeof(reset_context));
26bc5340 5071
6e3cd2a9 5072 /*
bb5c7235
WS
5073 * Special case: RAS triggered and full reset isn't supported
5074 */
5075 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5076
d5ea093e
AG
5077 /*
5078 * Flush RAM to disk so that after reboot
5079 * the user can read log and see why the system rebooted.
5080 */
bb5c7235 5081 if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
d5ea093e
AG
5082 DRM_WARN("Emergency reboot.");
5083
5084 ksys_sync_helper();
5085 emergency_restart();
5086 }
5087
b823821f 5088 dev_info(adev->dev, "GPU %s begin!\n",
bb5c7235 5089 need_emergency_restart ? "jobs stop":"reset");
26bc5340 5090
175ac6ec
ZL
5091 if (!amdgpu_sriov_vf(adev))
5092 hive = amdgpu_get_xgmi_hive(adev);
681260df 5093 if (hive)
53b3f8f4 5094 mutex_lock(&hive->hive_lock);
26bc5340 5095
04442bf7
LL
5096 reset_context.method = AMD_RESET_METHOD_NONE;
5097 reset_context.reset_req_dev = adev;
5098 reset_context.job = job;
5099 reset_context.hive = hive;
5100 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5101
9e94d22c
EQ
5102 /*
5103 * Build list of devices to reset.
5104 * In case we are in XGMI hive mode, resort the device list
5105 * to put adev in the 1st position.
5106 */
5107 INIT_LIST_HEAD(&device_list);
175ac6ec 5108 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
655ce9cb 5109 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
5110 list_add_tail(&tmp_adev->reset_list, &device_list);
5111 if (!list_is_first(&adev->reset_list, &device_list))
5112 list_rotate_to_front(&adev->reset_list, &device_list);
5113 device_list_handle = &device_list;
26bc5340 5114 } else {
655ce9cb 5115 list_add_tail(&adev->reset_list, &device_list);
26bc5340
AG
5116 device_list_handle = &device_list;
5117 }
5118
e923be99
AG
5119 /* We need to lock reset domain only once both for XGMI and single device */
5120 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5121 reset_list);
3675c2f2 5122 amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
e923be99 5123
1d721ed6 5124 /* block all schedulers and reset given job's ring */
655ce9cb 5125 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
f287a3c5 5126
e923be99 5127 amdgpu_device_set_mp1_state(tmp_adev);
f287a3c5 5128
3f12acc8
EQ
5129 /*
5130 * Try to put the audio codec into suspend state
5131 * before gpu reset started.
5132 *
5133 * Due to the power domain of the graphics device
5134 * is shared with AZ power domain. Without this,
5135 * we may change the audio hardware from behind
5136 * the audio driver's back. That will trigger
5137 * some audio codec errors.
5138 */
5139 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5140 audio_suspended = true;
5141
9e94d22c
EQ
5142 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5143
52fb44cf
EQ
5144 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5145
428890a3 5146 if (!amdgpu_sriov_vf(tmp_adev))
5147 amdgpu_amdkfd_pre_reset(tmp_adev);
9e94d22c 5148
12ffa55d
AG
5149 /*
5150 * Mark these ASICs to be reseted as untracked first
5151 * And add them back after reset completed
5152 */
5153 amdgpu_unregister_gpu_instance(tmp_adev);
5154
087451f3 5155 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
565d1941 5156
f1c1314b 5157 /* disable ras on ALL IPs */
bb5c7235 5158 if (!need_emergency_restart &&
b823821f 5159 amdgpu_device_ip_need_full_reset(tmp_adev))
f1c1314b 5160 amdgpu_ras_suspend(tmp_adev);
5161
1d721ed6
AG
5162 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5163 struct amdgpu_ring *ring = tmp_adev->rings[i];
5164
5165 if (!ring || !ring->sched.thread)
5166 continue;
5167
0b2d2c2e 5168 drm_sched_stop(&ring->sched, job ? &job->base : NULL);
7c6e68c7 5169
bb5c7235 5170 if (need_emergency_restart)
7c6e68c7 5171 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
1d721ed6 5172 }
8f8c80f4 5173 atomic_inc(&tmp_adev->gpu_reset_counter);
1d721ed6
AG
5174 }
5175
bb5c7235 5176 if (need_emergency_restart)
7c6e68c7
AG
5177 goto skip_sched_resume;
5178
1d721ed6
AG
5179 /*
5180 * Must check guilty signal here since after this point all old
5181 * HW fences are force signaled.
5182 *
5183 * job->base holds a reference to parent fence
5184 */
5185 if (job && job->base.s_fence->parent &&
7dd8c205 5186 dma_fence_is_signaled(job->base.s_fence->parent)) {
1d721ed6 5187 job_signaled = true;
1d721ed6
AG
5188 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5189 goto skip_hw_reset;
5190 }
5191
26bc5340 5192retry: /* Rest of adevs pre asic reset from XGMI hive. */
655ce9cb 5193 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
04442bf7 5194 r = amdgpu_device_pre_asic_reset(tmp_adev, &reset_context);
26bc5340
AG
5195 /*TODO Should we stop ?*/
5196 if (r) {
aac89168 5197 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
4a580877 5198 r, adev_to_drm(tmp_adev)->unique);
26bc5340
AG
5199 tmp_adev->asic_reset_res = r;
5200 }
5201 }
5202
e6c6338f 5203 tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
26bc5340 5204 /* Actual ASIC resets if needed.*/
4f30d920 5205 /* Host driver will handle XGMI hive reset for SRIOV */
26bc5340
AG
5206 if (amdgpu_sriov_vf(adev)) {
5207 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5208 if (r)
5209 adev->asic_reset_res = r;
5210 } else {
04442bf7 5211 r = amdgpu_do_asic_reset(device_list_handle, &reset_context);
26bc5340
AG
5212 if (r && r == -EAGAIN)
5213 goto retry;
5214 }
5215
1d721ed6
AG
5216skip_hw_reset:
5217
26bc5340 5218 /* Post ASIC reset for all devs .*/
655ce9cb 5219 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
7c6e68c7 5220
e6c6338f
JZ
5221 /*
5222 * Sometimes a later bad compute job can block a good gfx job as gfx
5223 * and compute ring share internal GC HW mutually. We add an additional
5224 * guilty jobs recheck step to find the real guilty job, it synchronously
5225 * submits and pends for the first job being signaled. If it gets timeout,
5226 * we identify it as a real guilty job.
5227 */
5228 if (amdgpu_gpu_recovery == 2 &&
5229 !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
04442bf7
LL
5230 amdgpu_device_recheck_guilty_jobs(
5231 tmp_adev, device_list_handle, &reset_context);
e6c6338f 5232
1d721ed6
AG
5233 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5234 struct amdgpu_ring *ring = tmp_adev->rings[i];
5235
5236 if (!ring || !ring->sched.thread)
5237 continue;
5238
5239 /* No point to resubmit jobs if we didn't HW reset*/
5240 if (!tmp_adev->asic_reset_res && !job_signaled)
5241 drm_sched_resubmit_jobs(&ring->sched);
5242
5243 drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
5244 }
5245
1053b9c9 5246 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
4a580877 5247 drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
1d721ed6
AG
5248 }
5249
7258fa31
SK
5250 if (tmp_adev->asic_reset_res)
5251 r = tmp_adev->asic_reset_res;
5252
1d721ed6 5253 tmp_adev->asic_reset_res = 0;
26bc5340
AG
5254
5255 if (r) {
5256 /* bad news, how to tell it to userspace ? */
12ffa55d 5257 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
26bc5340
AG
5258 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5259 } else {
12ffa55d 5260 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
3fa8f89d
S
5261 if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5262 DRM_WARN("smart shift update failed\n");
26bc5340 5263 }
7c6e68c7 5264 }
26bc5340 5265
7c6e68c7 5266skip_sched_resume:
655ce9cb 5267 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
428890a3 5268 /* unlock kfd: SRIOV would do it separately */
5269 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5270 amdgpu_amdkfd_post_reset(tmp_adev);
8e2712e7 5271
5272 /* kfd_post_reset will do nothing if kfd device is not initialized,
5273 * need to bring up kfd here if it's not be initialized before
5274 */
5275 if (!adev->kfd.init_complete)
5276 amdgpu_amdkfd_device_init(adev);
5277
3f12acc8
EQ
5278 if (audio_suspended)
5279 amdgpu_device_resume_display_audio(tmp_adev);
e923be99
AG
5280
5281 amdgpu_device_unset_mp1_state(tmp_adev);
26bc5340
AG
5282 }
5283
e923be99
AG
5284 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5285 reset_list);
5286 amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5287
9e94d22c 5288 if (hive) {
9e94d22c 5289 mutex_unlock(&hive->hive_lock);
d95e8e97 5290 amdgpu_put_xgmi_hive(hive);
9e94d22c 5291 }
26bc5340 5292
f287a3c5 5293 if (r)
26bc5340 5294 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
d38ceaf9
AD
5295 return r;
5296}
5297
54f329cc
AG
5298struct amdgpu_recover_work_struct {
5299 struct work_struct base;
5300 struct amdgpu_device *adev;
5301 struct amdgpu_job *job;
5302 int ret;
5303};
5304
5305static void amdgpu_device_queue_gpu_recover_work(struct work_struct *work)
5306{
5307 struct amdgpu_recover_work_struct *recover_work = container_of(work, struct amdgpu_recover_work_struct, base);
5308
5309 recover_work->ret = amdgpu_device_gpu_recover_imp(recover_work->adev, recover_work->job);
5310}
5311/*
5312 * Serialize gpu recover into reset domain single threaded wq
5313 */
5314int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5315 struct amdgpu_job *job)
5316{
5317 struct amdgpu_recover_work_struct work = {.adev = adev, .job = job};
5318
5319 INIT_WORK(&work.base, amdgpu_device_queue_gpu_recover_work);
5320
cfbb6b00 5321 if (!amdgpu_reset_domain_schedule(adev->reset_domain, &work.base))
54f329cc
AG
5322 return -EAGAIN;
5323
5324 flush_work(&work.base);
5325
5326 return work.ret;
5327}
5328
e3ecdffa
AD
5329/**
5330 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5331 *
5332 * @adev: amdgpu_device pointer
5333 *
5334 * Fetchs and stores in the driver the PCIE capabilities (gen speed
5335 * and lanes) of the slot the device is in. Handles APUs and
5336 * virtualized environments where PCIE config space may not be available.
5337 */
5494d864 5338static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
d0dd7f0c 5339{
5d9a6330 5340 struct pci_dev *pdev;
c5313457
HK
5341 enum pci_bus_speed speed_cap, platform_speed_cap;
5342 enum pcie_link_width platform_link_width;
d0dd7f0c 5343
cd474ba0
AD
5344 if (amdgpu_pcie_gen_cap)
5345 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
d0dd7f0c 5346
cd474ba0
AD
5347 if (amdgpu_pcie_lane_cap)
5348 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
d0dd7f0c 5349
cd474ba0
AD
5350 /* covers APUs as well */
5351 if (pci_is_root_bus(adev->pdev->bus)) {
5352 if (adev->pm.pcie_gen_mask == 0)
5353 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5354 if (adev->pm.pcie_mlw_mask == 0)
5355 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c 5356 return;
cd474ba0 5357 }
d0dd7f0c 5358
c5313457
HK
5359 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5360 return;
5361
dbaa922b
AD
5362 pcie_bandwidth_available(adev->pdev, NULL,
5363 &platform_speed_cap, &platform_link_width);
c5313457 5364
cd474ba0 5365 if (adev->pm.pcie_gen_mask == 0) {
5d9a6330
AD
5366 /* asic caps */
5367 pdev = adev->pdev;
5368 speed_cap = pcie_get_speed_cap(pdev);
5369 if (speed_cap == PCI_SPEED_UNKNOWN) {
5370 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
cd474ba0
AD
5371 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5372 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
cd474ba0 5373 } else {
2b3a1f51
FX
5374 if (speed_cap == PCIE_SPEED_32_0GT)
5375 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5376 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5377 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5378 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5379 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5380 else if (speed_cap == PCIE_SPEED_16_0GT)
5d9a6330
AD
5381 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5382 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5383 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5384 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5385 else if (speed_cap == PCIE_SPEED_8_0GT)
5386 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5387 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5388 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5389 else if (speed_cap == PCIE_SPEED_5_0GT)
5390 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5391 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5392 else
5393 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5394 }
5395 /* platform caps */
c5313457 5396 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5d9a6330
AD
5397 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5398 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5399 } else {
2b3a1f51
FX
5400 if (platform_speed_cap == PCIE_SPEED_32_0GT)
5401 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5402 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5403 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5404 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5405 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5406 else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5d9a6330
AD
5407 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5408 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5409 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5410 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
c5313457 5411 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5d9a6330
AD
5412 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5413 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5414 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
c5313457 5415 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5d9a6330
AD
5416 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5417 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5418 else
5419 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5420
cd474ba0
AD
5421 }
5422 }
5423 if (adev->pm.pcie_mlw_mask == 0) {
c5313457 5424 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5d9a6330
AD
5425 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5426 } else {
c5313457 5427 switch (platform_link_width) {
5d9a6330 5428 case PCIE_LNK_X32:
cd474ba0
AD
5429 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5430 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5431 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5432 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5433 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5434 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5435 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5436 break;
5d9a6330 5437 case PCIE_LNK_X16:
cd474ba0
AD
5438 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5439 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5440 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5441 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5442 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5443 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5444 break;
5d9a6330 5445 case PCIE_LNK_X12:
cd474ba0
AD
5446 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5447 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5448 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5449 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5450 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5451 break;
5d9a6330 5452 case PCIE_LNK_X8:
cd474ba0
AD
5453 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5454 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5455 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5456 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5457 break;
5d9a6330 5458 case PCIE_LNK_X4:
cd474ba0
AD
5459 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5460 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5461 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5462 break;
5d9a6330 5463 case PCIE_LNK_X2:
cd474ba0
AD
5464 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5465 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5466 break;
5d9a6330 5467 case PCIE_LNK_X1:
cd474ba0
AD
5468 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5469 break;
5470 default:
5471 break;
5472 }
d0dd7f0c
AD
5473 }
5474 }
5475}
d38ceaf9 5476
361dbd01
AD
5477int amdgpu_device_baco_enter(struct drm_device *dev)
5478{
1348969a 5479 struct amdgpu_device *adev = drm_to_adev(dev);
7a22677b 5480 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
361dbd01 5481
4a580877 5482 if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
361dbd01
AD
5483 return -ENOTSUPP;
5484
8ab0d6f0 5485 if (ras && adev->ras_enabled &&
acdae216 5486 adev->nbio.funcs->enable_doorbell_interrupt)
7a22677b
LM
5487 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5488
9530273e 5489 return amdgpu_dpm_baco_enter(adev);
361dbd01
AD
5490}
5491
5492int amdgpu_device_baco_exit(struct drm_device *dev)
5493{
1348969a 5494 struct amdgpu_device *adev = drm_to_adev(dev);
7a22677b 5495 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
9530273e 5496 int ret = 0;
361dbd01 5497
4a580877 5498 if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
361dbd01
AD
5499 return -ENOTSUPP;
5500
9530273e
EQ
5501 ret = amdgpu_dpm_baco_exit(adev);
5502 if (ret)
5503 return ret;
7a22677b 5504
8ab0d6f0 5505 if (ras && adev->ras_enabled &&
acdae216 5506 adev->nbio.funcs->enable_doorbell_interrupt)
7a22677b
LM
5507 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5508
1bece222
CL
5509 if (amdgpu_passthrough(adev) &&
5510 adev->nbio.funcs->clear_doorbell_interrupt)
5511 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5512
7a22677b 5513 return 0;
361dbd01 5514}
c9a6b82f
AG
5515
5516/**
5517 * amdgpu_pci_error_detected - Called when a PCI error is detected.
5518 * @pdev: PCI device struct
5519 * @state: PCI channel state
5520 *
5521 * Description: Called when a PCI error is detected.
5522 *
5523 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5524 */
5525pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5526{
5527 struct drm_device *dev = pci_get_drvdata(pdev);
5528 struct amdgpu_device *adev = drm_to_adev(dev);
acd89fca 5529 int i;
c9a6b82f
AG
5530
5531 DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5532
6894305c
AG
5533 if (adev->gmc.xgmi.num_physical_nodes > 1) {
5534 DRM_WARN("No support for XGMI hive yet...");
5535 return PCI_ERS_RESULT_DISCONNECT;
5536 }
5537
e17e27f9
GC
5538 adev->pci_channel_state = state;
5539
c9a6b82f
AG
5540 switch (state) {
5541 case pci_channel_io_normal:
5542 return PCI_ERS_RESULT_CAN_RECOVER;
acd89fca 5543 /* Fatal error, prepare for slot reset */
8a11d283
TZ
5544 case pci_channel_io_frozen:
5545 /*
d0fb18b5 5546 * Locking adev->reset_domain->sem will prevent any external access
acd89fca
AG
5547 * to GPU during PCI error recovery
5548 */
3675c2f2 5549 amdgpu_device_lock_reset_domain(adev->reset_domain);
e923be99 5550 amdgpu_device_set_mp1_state(adev);
acd89fca
AG
5551
5552 /*
5553 * Block any work scheduling as we do for regular GPU reset
5554 * for the duration of the recovery
5555 */
5556 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5557 struct amdgpu_ring *ring = adev->rings[i];
5558
5559 if (!ring || !ring->sched.thread)
5560 continue;
5561
5562 drm_sched_stop(&ring->sched, NULL);
5563 }
8f8c80f4 5564 atomic_inc(&adev->gpu_reset_counter);
c9a6b82f
AG
5565 return PCI_ERS_RESULT_NEED_RESET;
5566 case pci_channel_io_perm_failure:
5567 /* Permanent error, prepare for device removal */
5568 return PCI_ERS_RESULT_DISCONNECT;
5569 }
5570
5571 return PCI_ERS_RESULT_NEED_RESET;
5572}
5573
5574/**
5575 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5576 * @pdev: pointer to PCI device
5577 */
5578pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5579{
5580
5581 DRM_INFO("PCI error: mmio enabled callback!!\n");
5582
5583 /* TODO - dump whatever for debugging purposes */
5584
5585 /* This called only if amdgpu_pci_error_detected returns
5586 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5587 * works, no need to reset slot.
5588 */
5589
5590 return PCI_ERS_RESULT_RECOVERED;
5591}
5592
5593/**
5594 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5595 * @pdev: PCI device struct
5596 *
5597 * Description: This routine is called by the pci error recovery
5598 * code after the PCI slot has been reset, just before we
5599 * should resume normal operations.
5600 */
5601pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5602{
5603 struct drm_device *dev = pci_get_drvdata(pdev);
5604 struct amdgpu_device *adev = drm_to_adev(dev);
362c7b91 5605 int r, i;
04442bf7 5606 struct amdgpu_reset_context reset_context;
362c7b91 5607 u32 memsize;
7ac71382 5608 struct list_head device_list;
c9a6b82f
AG
5609
5610 DRM_INFO("PCI error: slot reset callback!!\n");
5611
04442bf7
LL
5612 memset(&reset_context, 0, sizeof(reset_context));
5613
7ac71382 5614 INIT_LIST_HEAD(&device_list);
655ce9cb 5615 list_add_tail(&adev->reset_list, &device_list);
7ac71382 5616
362c7b91
AG
5617 /* wait for asic to come out of reset */
5618 msleep(500);
5619
7ac71382 5620 /* Restore PCI confspace */
c1dd4aa6 5621 amdgpu_device_load_pci_state(pdev);
c9a6b82f 5622
362c7b91
AG
5623 /* confirm ASIC came out of reset */
5624 for (i = 0; i < adev->usec_timeout; i++) {
5625 memsize = amdgpu_asic_get_config_memsize(adev);
5626
5627 if (memsize != 0xffffffff)
5628 break;
5629 udelay(1);
5630 }
5631 if (memsize == 0xffffffff) {
5632 r = -ETIME;
5633 goto out;
5634 }
5635
04442bf7
LL
5636 reset_context.method = AMD_RESET_METHOD_NONE;
5637 reset_context.reset_req_dev = adev;
5638 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5639 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5640
7afefb81 5641 adev->no_hw_access = true;
04442bf7 5642 r = amdgpu_device_pre_asic_reset(adev, &reset_context);
7afefb81 5643 adev->no_hw_access = false;
c9a6b82f
AG
5644 if (r)
5645 goto out;
5646
04442bf7 5647 r = amdgpu_do_asic_reset(&device_list, &reset_context);
c9a6b82f
AG
5648
5649out:
c9a6b82f 5650 if (!r) {
c1dd4aa6
AG
5651 if (amdgpu_device_cache_pci_state(adev->pdev))
5652 pci_restore_state(adev->pdev);
5653
c9a6b82f
AG
5654 DRM_INFO("PCIe error recovery succeeded\n");
5655 } else {
5656 DRM_ERROR("PCIe error recovery failed, err:%d", r);
e923be99
AG
5657 amdgpu_device_unset_mp1_state(adev);
5658 amdgpu_device_unlock_reset_domain(adev->reset_domain);
c9a6b82f
AG
5659 }
5660
5661 return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5662}
5663
5664/**
5665 * amdgpu_pci_resume() - resume normal ops after PCI reset
5666 * @pdev: pointer to PCI device
5667 *
5668 * Called when the error recovery driver tells us that its
505199a3 5669 * OK to resume normal operation.
c9a6b82f
AG
5670 */
5671void amdgpu_pci_resume(struct pci_dev *pdev)
5672{
5673 struct drm_device *dev = pci_get_drvdata(pdev);
5674 struct amdgpu_device *adev = drm_to_adev(dev);
acd89fca 5675 int i;
c9a6b82f 5676
c9a6b82f
AG
5677
5678 DRM_INFO("PCI error: resume callback!!\n");
acd89fca 5679
e17e27f9
GC
5680 /* Only continue execution for the case of pci_channel_io_frozen */
5681 if (adev->pci_channel_state != pci_channel_io_frozen)
5682 return;
5683
acd89fca
AG
5684 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5685 struct amdgpu_ring *ring = adev->rings[i];
5686
5687 if (!ring || !ring->sched.thread)
5688 continue;
5689
5690
5691 drm_sched_resubmit_jobs(&ring->sched);
5692 drm_sched_start(&ring->sched, true);
5693 }
5694
e923be99
AG
5695 amdgpu_device_unset_mp1_state(adev);
5696 amdgpu_device_unlock_reset_domain(adev->reset_domain);
c9a6b82f 5697}
c1dd4aa6
AG
5698
5699bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5700{
5701 struct drm_device *dev = pci_get_drvdata(pdev);
5702 struct amdgpu_device *adev = drm_to_adev(dev);
5703 int r;
5704
5705 r = pci_save_state(pdev);
5706 if (!r) {
5707 kfree(adev->pci_state);
5708
5709 adev->pci_state = pci_store_saved_state(pdev);
5710
5711 if (!adev->pci_state) {
5712 DRM_ERROR("Failed to store PCI saved state");
5713 return false;
5714 }
5715 } else {
5716 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5717 return false;
5718 }
5719
5720 return true;
5721}
5722
5723bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5724{
5725 struct drm_device *dev = pci_get_drvdata(pdev);
5726 struct amdgpu_device *adev = drm_to_adev(dev);
5727 int r;
5728
5729 if (!adev->pci_state)
5730 return false;
5731
5732 r = pci_load_saved_state(pdev, adev->pci_state);
5733
5734 if (!r) {
5735 pci_restore_state(pdev);
5736 } else {
5737 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5738 return false;
5739 }
5740
5741 return true;
5742}
5743
810085dd
EH
5744void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5745 struct amdgpu_ring *ring)
5746{
5747#ifdef CONFIG_X86_64
b818a5d3 5748 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
810085dd
EH
5749 return;
5750#endif
5751 if (adev->gmc.xgmi.connected_to_cpu)
5752 return;
5753
5754 if (ring && ring->funcs->emit_hdp_flush)
5755 amdgpu_ring_emit_hdp_flush(ring);
5756 else
5757 amdgpu_asic_flush_hdp(adev, ring);
5758}
c1dd4aa6 5759
810085dd
EH
5760void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5761 struct amdgpu_ring *ring)
5762{
5763#ifdef CONFIG_X86_64
b818a5d3 5764 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
810085dd
EH
5765 return;
5766#endif
5767 if (adev->gmc.xgmi.connected_to_cpu)
5768 return;
c1dd4aa6 5769
810085dd
EH
5770 amdgpu_asic_invalidate_hdp(adev, ring);
5771}
34f3a4a9 5772
89a7a870
AG
5773int amdgpu_in_reset(struct amdgpu_device *adev)
5774{
5775 return atomic_read(&adev->reset_domain->in_gpu_reset);
5776 }
5777
34f3a4a9
LY
5778/**
5779 * amdgpu_device_halt() - bring hardware to some kind of halt state
5780 *
5781 * @adev: amdgpu_device pointer
5782 *
5783 * Bring hardware to some kind of halt state so that no one can touch it
5784 * any more. It will help to maintain error context when error occurred.
5785 * Compare to a simple hang, the system will keep stable at least for SSH
5786 * access. Then it should be trivial to inspect the hardware state and
5787 * see what's going on. Implemented as following:
5788 *
5789 * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
5790 * clears all CPU mappings to device, disallows remappings through page faults
5791 * 2. amdgpu_irq_disable_all() disables all interrupts
5792 * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
5793 * 4. set adev->no_hw_access to avoid potential crashes after setp 5
5794 * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
5795 * 6. pci_disable_device() and pci_wait_for_pending_transaction()
5796 * flush any in flight DMA operations
5797 */
5798void amdgpu_device_halt(struct amdgpu_device *adev)
5799{
5800 struct pci_dev *pdev = adev->pdev;
e0f943b4 5801 struct drm_device *ddev = adev_to_drm(adev);
34f3a4a9
LY
5802
5803 drm_dev_unplug(ddev);
5804
5805 amdgpu_irq_disable_all(adev);
5806
5807 amdgpu_fence_driver_hw_fini(adev);
5808
5809 adev->no_hw_access = true;
5810
5811 amdgpu_device_unmap_mmio(adev);
5812
5813 pci_disable_device(pdev);
5814 pci_wait_for_pending_transaction(pdev);
5815}
86700a40
XD
5816
5817u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
5818 u32 reg)
5819{
5820 unsigned long flags, address, data;
5821 u32 r;
5822
5823 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5824 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5825
5826 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5827 WREG32(address, reg * 4);
5828 (void)RREG32(address);
5829 r = RREG32(data);
5830 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5831 return r;
5832}
5833
5834void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
5835 u32 reg, u32 v)
5836{
5837 unsigned long flags, address, data;
5838
5839 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5840 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5841
5842 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5843 WREG32(address, reg * 4);
5844 (void)RREG32(address);
5845 WREG32(data, v);
5846 (void)RREG32(data);
5847 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5848}