drm/amdgpu: revert "Adjust removal control flow for smu v13_0_2"
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
b1ddf548 28#include <linux/power_supply.h>
0875dc9e 29#include <linux/kthread.h>
fdf2f6c5 30#include <linux/module.h>
d38ceaf9
AD
31#include <linux/console.h>
32#include <linux/slab.h>
4a74c38c 33#include <linux/iommu.h>
901e2be2 34#include <linux/pci.h>
08a2fd23 35#include <linux/pci-p2pdma.h>
d37a3929 36#include <linux/apple-gmux.h>
fdf2f6c5 37
b7cdb41e 38#include <drm/drm_aperture.h>
4562236b 39#include <drm/drm_atomic_helper.h>
973ad627 40#include <drm/drm_crtc_helper.h>
45b64fd9 41#include <drm/drm_fb_helper.h>
fcd70cd3 42#include <drm/drm_probe_helper.h>
d38ceaf9 43#include <drm/amdgpu_drm.h>
7b1c6263 44#include <linux/device.h>
d38ceaf9
AD
45#include <linux/vgaarb.h>
46#include <linux/vga_switcheroo.h>
47#include <linux/efi.h>
48#include "amdgpu.h"
f4b373f4 49#include "amdgpu_trace.h"
d38ceaf9
AD
50#include "amdgpu_i2c.h"
51#include "atom.h"
52#include "amdgpu_atombios.h"
a5bde2f9 53#include "amdgpu_atomfirmware.h"
d0dd7f0c 54#include "amd_pcie.h"
33f34802
KW
55#ifdef CONFIG_DRM_AMDGPU_SI
56#include "si.h"
57#endif
a2e73f56
AD
58#ifdef CONFIG_DRM_AMDGPU_CIK
59#include "cik.h"
60#endif
aaa36a97 61#include "vi.h"
460826e6 62#include "soc15.h"
0a5b8c7b 63#include "nv.h"
d38ceaf9 64#include "bif/bif_4_1_d.h"
bec86378 65#include <linux/firmware.h>
89041940 66#include "amdgpu_vf_error.h"
d38ceaf9 67
ba997709 68#include "amdgpu_amdkfd.h"
d2f52ac8 69#include "amdgpu_pm.h"
d38ceaf9 70
5183411b 71#include "amdgpu_xgmi.h"
c030f2e4 72#include "amdgpu_ras.h"
9c7c85f7 73#include "amdgpu_pmu.h"
bd607166 74#include "amdgpu_fru_eeprom.h"
04442bf7 75#include "amdgpu_reset.h"
85150626 76#include "amdgpu_virt.h"
5183411b 77
d5ea093e 78#include <linux/suspend.h>
c6a6e2db 79#include <drm/task_barrier.h>
3f12acc8 80#include <linux/pm_runtime.h>
d5ea093e 81
f89f8c6b
AG
82#include <drm/drm_drv.h>
83
3ad5dcfe
KHF
84#if IS_ENABLED(CONFIG_X86)
85#include <asm/intel-family.h>
86#endif
87
e2a75f88 88MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
3f76dced 89MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
2d2e5e7e 90MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
ad5a67a7 91MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
54c4d17e 92MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
65e60f6e 93MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
42b325e5 94MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
e2a75f88 95
2dc80b00 96#define AMDGPU_RESUME_MS 2000
7258fa31
SK
97#define AMDGPU_MAX_RETRY_LIMIT 2
98#define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
ad390542
HZ
99#define AMDGPU_PCIE_INDEX_FALLBACK (0x38 >> 2)
100#define AMDGPU_PCIE_INDEX_HI_FALLBACK (0x44 >> 2)
101#define AMDGPU_PCIE_DATA_FALLBACK (0x3C >> 2)
2dc80b00 102
b7cdb41e
ML
103static const struct drm_driver amdgpu_kms_driver;
104
050091ab 105const char *amdgpu_asic_name[] = {
da69c161
KW
106 "TAHITI",
107 "PITCAIRN",
108 "VERDE",
109 "OLAND",
110 "HAINAN",
d38ceaf9
AD
111 "BONAIRE",
112 "KAVERI",
113 "KABINI",
114 "HAWAII",
115 "MULLINS",
116 "TOPAZ",
117 "TONGA",
48299f95 118 "FIJI",
d38ceaf9 119 "CARRIZO",
139f4917 120 "STONEY",
2cc0c0b5
FC
121 "POLARIS10",
122 "POLARIS11",
c4642a47 123 "POLARIS12",
48ff108d 124 "VEGAM",
d4196f01 125 "VEGA10",
8fab806a 126 "VEGA12",
956fcddc 127 "VEGA20",
2ca8a5d2 128 "RAVEN",
d6c3b24e 129 "ARCTURUS",
1eee4228 130 "RENOIR",
d46b417a 131 "ALDEBARAN",
852a6626 132 "NAVI10",
d0f56dc2 133 "CYAN_SKILLFISH",
87dbad02 134 "NAVI14",
9802f5d7 135 "NAVI12",
ccaf72d3 136 "SIENNA_CICHLID",
ddd8fbe7 137 "NAVY_FLOUNDER",
4f1e9a76 138 "VANGOGH",
a2468e04 139 "DIMGREY_CAVEFISH",
6f169591 140 "BEIGE_GOBY",
ee9236b7 141 "YELLOW_CARP",
3ae695d6 142 "IP DISCOVERY",
d38ceaf9
AD
143 "LAST",
144};
145
dcea6e65
KR
146/**
147 * DOC: pcie_replay_count
148 *
149 * The amdgpu driver provides a sysfs API for reporting the total number
150 * of PCIe replays (NAKs)
151 * The file pcie_replay_count is used for this and returns the total
152 * number of replays as a sum of the NAKs generated and NAKs received
153 */
154
155static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
156 struct device_attribute *attr, char *buf)
157{
158 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 159 struct amdgpu_device *adev = drm_to_adev(ddev);
dcea6e65
KR
160 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
161
36000c7a 162 return sysfs_emit(buf, "%llu\n", cnt);
dcea6e65
KR
163}
164
b8920e1e 165static DEVICE_ATTR(pcie_replay_count, 0444,
dcea6e65
KR
166 amdgpu_device_get_pcie_replay_count, NULL);
167
af39e6f4
LL
168static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj,
169 struct bin_attribute *attr, char *buf,
170 loff_t ppos, size_t count)
171{
172 struct device *dev = kobj_to_dev(kobj);
173 struct drm_device *ddev = dev_get_drvdata(dev);
174 struct amdgpu_device *adev = drm_to_adev(ddev);
175 ssize_t bytes_read;
176
177 switch (ppos) {
178 case AMDGPU_SYS_REG_STATE_XGMI:
179 bytes_read = amdgpu_asic_get_reg_state(
180 adev, AMDGPU_REG_STATE_TYPE_XGMI, buf, count);
181 break;
182 case AMDGPU_SYS_REG_STATE_WAFL:
183 bytes_read = amdgpu_asic_get_reg_state(
184 adev, AMDGPU_REG_STATE_TYPE_WAFL, buf, count);
185 break;
186 case AMDGPU_SYS_REG_STATE_PCIE:
187 bytes_read = amdgpu_asic_get_reg_state(
188 adev, AMDGPU_REG_STATE_TYPE_PCIE, buf, count);
189 break;
190 case AMDGPU_SYS_REG_STATE_USR:
191 bytes_read = amdgpu_asic_get_reg_state(
192 adev, AMDGPU_REG_STATE_TYPE_USR, buf, count);
193 break;
194 case AMDGPU_SYS_REG_STATE_USR_1:
195 bytes_read = amdgpu_asic_get_reg_state(
196 adev, AMDGPU_REG_STATE_TYPE_USR_1, buf, count);
197 break;
198 default:
199 return -EINVAL;
200 }
201
202 return bytes_read;
203}
204
205BIN_ATTR(reg_state, 0444, amdgpu_sysfs_reg_state_get, NULL,
206 AMDGPU_SYS_REG_STATE_END);
207
208int amdgpu_reg_state_sysfs_init(struct amdgpu_device *adev)
209{
210 int ret;
211
212 if (!amdgpu_asic_get_reg_state_supported(adev))
213 return 0;
214
215 ret = sysfs_create_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
216
217 return ret;
218}
219
220void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev)
221{
222 if (!amdgpu_asic_get_reg_state_supported(adev))
223 return;
224 sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
225}
226
4798db85
LL
227/**
228 * DOC: board_info
229 *
230 * The amdgpu driver provides a sysfs API for giving board related information.
231 * It provides the form factor information in the format
232 *
233 * type : form factor
234 *
235 * Possible form factor values
236 *
237 * - "cem" - PCIE CEM card
238 * - "oam" - Open Compute Accelerator Module
239 * - "unknown" - Not known
240 *
241 */
242
76da73f0
LL
243static ssize_t amdgpu_device_get_board_info(struct device *dev,
244 struct device_attribute *attr,
245 char *buf)
246{
247 struct drm_device *ddev = dev_get_drvdata(dev);
248 struct amdgpu_device *adev = drm_to_adev(ddev);
249 enum amdgpu_pkg_type pkg_type = AMDGPU_PKG_TYPE_CEM;
250 const char *pkg;
251
252 if (adev->smuio.funcs && adev->smuio.funcs->get_pkg_type)
253 pkg_type = adev->smuio.funcs->get_pkg_type(adev);
254
255 switch (pkg_type) {
256 case AMDGPU_PKG_TYPE_CEM:
257 pkg = "cem";
258 break;
259 case AMDGPU_PKG_TYPE_OAM:
260 pkg = "oam";
261 break;
262 default:
263 pkg = "unknown";
264 break;
265 }
266
267 return sysfs_emit(buf, "%s : %s\n", "type", pkg);
268}
269
270static DEVICE_ATTR(board_info, 0444, amdgpu_device_get_board_info, NULL);
271
272static struct attribute *amdgpu_board_attrs[] = {
273 &dev_attr_board_info.attr,
274 NULL,
275};
276
277static umode_t amdgpu_board_attrs_is_visible(struct kobject *kobj,
278 struct attribute *attr, int n)
279{
280 struct device *dev = kobj_to_dev(kobj);
281 struct drm_device *ddev = dev_get_drvdata(dev);
282 struct amdgpu_device *adev = drm_to_adev(ddev);
283
284 if (adev->flags & AMD_IS_APU)
285 return 0;
286
287 return attr->mode;
288}
289
290static const struct attribute_group amdgpu_board_attrs_group = {
291 .attrs = amdgpu_board_attrs,
292 .is_visible = amdgpu_board_attrs_is_visible
293};
294
5494d864
AD
295static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
296
bd607166 297
fd496ca8 298/**
b98c6299 299 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
fd496ca8
AD
300 *
301 * @dev: drm_device pointer
302 *
b98c6299 303 * Returns true if the device is a dGPU with ATPX power control,
fd496ca8
AD
304 * otherwise return false.
305 */
b98c6299 306bool amdgpu_device_supports_px(struct drm_device *dev)
fd496ca8
AD
307{
308 struct amdgpu_device *adev = drm_to_adev(dev);
309
b98c6299 310 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
fd496ca8
AD
311 return true;
312 return false;
313}
314
e3ecdffa 315/**
0330b848 316 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
e3ecdffa
AD
317 *
318 * @dev: drm_device pointer
319 *
b98c6299 320 * Returns true if the device is a dGPU with ACPI power control,
e3ecdffa
AD
321 * otherwise return false.
322 */
31af062a 323bool amdgpu_device_supports_boco(struct drm_device *dev)
d38ceaf9 324{
1348969a 325 struct amdgpu_device *adev = drm_to_adev(dev);
d38ceaf9 326
b98c6299
AD
327 if (adev->has_pr3 ||
328 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
d38ceaf9
AD
329 return true;
330 return false;
331}
332
a69cba42
AD
333/**
334 * amdgpu_device_supports_baco - Does the device support BACO
335 *
336 * @dev: drm_device pointer
337 *
338 * Returns true if the device supporte BACO,
339 * otherwise return false.
340 */
341bool amdgpu_device_supports_baco(struct drm_device *dev)
342{
1348969a 343 struct amdgpu_device *adev = drm_to_adev(dev);
a69cba42
AD
344
345 return amdgpu_asic_supports_baco(adev);
346}
347
3fa8f89d
S
348/**
349 * amdgpu_device_supports_smart_shift - Is the device dGPU with
350 * smart shift support
351 *
352 * @dev: drm_device pointer
353 *
354 * Returns true if the device is a dGPU with Smart Shift support,
355 * otherwise returns false.
356 */
357bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
358{
359 return (amdgpu_device_supports_boco(dev) &&
360 amdgpu_acpi_is_power_shift_control_supported());
361}
362
6e3cd2a9
MCC
363/*
364 * VRAM access helper functions
365 */
366
e35e2b11 367/**
048af66b 368 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
e35e2b11
TY
369 *
370 * @adev: amdgpu_device pointer
371 * @pos: offset of the buffer in vram
372 * @buf: virtual address of the buffer in system memory
373 * @size: read/write size, sizeof(@buf) must > @size
374 * @write: true - write to vram, otherwise - read from vram
375 */
048af66b
KW
376void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
377 void *buf, size_t size, bool write)
e35e2b11 378{
e35e2b11 379 unsigned long flags;
048af66b
KW
380 uint32_t hi = ~0, tmp = 0;
381 uint32_t *data = buf;
ce05ac56 382 uint64_t last;
f89f8c6b 383 int idx;
ce05ac56 384
c58a863b 385 if (!drm_dev_enter(adev_to_drm(adev), &idx))
f89f8c6b 386 return;
9d11eb0d 387
048af66b
KW
388 BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
389
390 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
391 for (last = pos + size; pos < last; pos += 4) {
392 tmp = pos >> 31;
393
394 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
395 if (tmp != hi) {
396 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
397 hi = tmp;
398 }
399 if (write)
400 WREG32_NO_KIQ(mmMM_DATA, *data++);
401 else
402 *data++ = RREG32_NO_KIQ(mmMM_DATA);
403 }
404
405 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
406 drm_dev_exit(idx);
407}
408
409/**
bbe04dec 410 * amdgpu_device_aper_access - access vram by vram aperature
048af66b
KW
411 *
412 * @adev: amdgpu_device pointer
413 * @pos: offset of the buffer in vram
414 * @buf: virtual address of the buffer in system memory
415 * @size: read/write size, sizeof(@buf) must > @size
416 * @write: true - write to vram, otherwise - read from vram
417 *
418 * The return value means how many bytes have been transferred.
419 */
420size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
421 void *buf, size_t size, bool write)
422{
9d11eb0d 423#ifdef CONFIG_64BIT
048af66b
KW
424 void __iomem *addr;
425 size_t count = 0;
426 uint64_t last;
427
428 if (!adev->mman.aper_base_kaddr)
429 return 0;
430
9d11eb0d
CK
431 last = min(pos + size, adev->gmc.visible_vram_size);
432 if (last > pos) {
048af66b
KW
433 addr = adev->mman.aper_base_kaddr + pos;
434 count = last - pos;
9d11eb0d
CK
435
436 if (write) {
437 memcpy_toio(addr, buf, count);
4c452b5c
SS
438 /* Make sure HDP write cache flush happens without any reordering
439 * after the system memory contents are sent over PCIe device
440 */
9d11eb0d 441 mb();
810085dd 442 amdgpu_device_flush_hdp(adev, NULL);
9d11eb0d 443 } else {
810085dd 444 amdgpu_device_invalidate_hdp(adev, NULL);
4c452b5c
SS
445 /* Make sure HDP read cache is invalidated before issuing a read
446 * to the PCIe device
447 */
9d11eb0d
CK
448 mb();
449 memcpy_fromio(buf, addr, count);
450 }
451
9d11eb0d 452 }
048af66b
KW
453
454 return count;
455#else
456 return 0;
9d11eb0d 457#endif
048af66b 458}
9d11eb0d 459
048af66b
KW
460/**
461 * amdgpu_device_vram_access - read/write a buffer in vram
462 *
463 * @adev: amdgpu_device pointer
464 * @pos: offset of the buffer in vram
465 * @buf: virtual address of the buffer in system memory
466 * @size: read/write size, sizeof(@buf) must > @size
467 * @write: true - write to vram, otherwise - read from vram
468 */
469void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
470 void *buf, size_t size, bool write)
471{
472 size_t count;
e35e2b11 473
048af66b
KW
474 /* try to using vram apreature to access vram first */
475 count = amdgpu_device_aper_access(adev, pos, buf, size, write);
476 size -= count;
477 if (size) {
478 /* using MM to access rest vram */
479 pos += count;
480 buf += count;
481 amdgpu_device_mm_access(adev, pos, buf, size, write);
e35e2b11
TY
482 }
483}
484
d38ceaf9 485/*
f7ee1874 486 * register access helper functions.
d38ceaf9 487 */
56b53c0b
DL
488
489/* Check if hw access should be skipped because of hotplug or device error */
490bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
491{
7afefb81 492 if (adev->no_hw_access)
56b53c0b
DL
493 return true;
494
495#ifdef CONFIG_LOCKDEP
496 /*
497 * This is a bit complicated to understand, so worth a comment. What we assert
498 * here is that the GPU reset is not running on another thread in parallel.
499 *
500 * For this we trylock the read side of the reset semaphore, if that succeeds
501 * we know that the reset is not running in paralell.
502 *
503 * If the trylock fails we assert that we are either already holding the read
504 * side of the lock or are the reset thread itself and hold the write side of
505 * the lock.
506 */
507 if (in_task()) {
d0fb18b5
AG
508 if (down_read_trylock(&adev->reset_domain->sem))
509 up_read(&adev->reset_domain->sem);
56b53c0b 510 else
d0fb18b5 511 lockdep_assert_held(&adev->reset_domain->sem);
56b53c0b
DL
512 }
513#endif
514 return false;
515}
516
e3ecdffa 517/**
f7ee1874 518 * amdgpu_device_rreg - read a memory mapped IO or indirect register
e3ecdffa
AD
519 *
520 * @adev: amdgpu_device pointer
521 * @reg: dword aligned register offset
522 * @acc_flags: access flags which require special behavior
523 *
524 * Returns the 32 bit value from the offset specified.
525 */
f7ee1874
HZ
526uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
527 uint32_t reg, uint32_t acc_flags)
d38ceaf9 528{
f4b373f4
TSD
529 uint32_t ret;
530
56b53c0b 531 if (amdgpu_device_skip_hw_access(adev))
bf36b52e
AG
532 return 0;
533
f7ee1874
HZ
534 if ((reg * 4) < adev->rmmio_size) {
535 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
536 amdgpu_sriov_runtime(adev) &&
d0fb18b5 537 down_read_trylock(&adev->reset_domain->sem)) {
85150626 538 ret = amdgpu_kiq_rreg(adev, reg, 0);
d0fb18b5 539 up_read(&adev->reset_domain->sem);
f7ee1874
HZ
540 } else {
541 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
542 }
543 } else {
544 ret = adev->pcie_rreg(adev, reg * 4);
81202807 545 }
bc992ba5 546
f7ee1874 547 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
e78b579d 548
f4b373f4 549 return ret;
d38ceaf9
AD
550}
551
421a2a30
ML
552/*
553 * MMIO register read with bytes helper functions
554 * @offset:bytes offset from MMIO start
b8920e1e 555 */
421a2a30 556
e3ecdffa
AD
557/**
558 * amdgpu_mm_rreg8 - read a memory mapped IO register
559 *
560 * @adev: amdgpu_device pointer
561 * @offset: byte aligned register offset
562 *
563 * Returns the 8 bit value from the offset specified.
564 */
7cbbc745
AG
565uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
566{
56b53c0b 567 if (amdgpu_device_skip_hw_access(adev))
bf36b52e
AG
568 return 0;
569
421a2a30
ML
570 if (offset < adev->rmmio_size)
571 return (readb(adev->rmmio + offset));
572 BUG();
573}
574
85150626
VL
575
576/**
577 * amdgpu_device_xcc_rreg - read a memory mapped IO or indirect register with specific XCC
578 *
579 * @adev: amdgpu_device pointer
580 * @reg: dword aligned register offset
581 * @acc_flags: access flags which require special behavior
582 * @xcc_id: xcc accelerated compute core id
583 *
584 * Returns the 32 bit value from the offset specified.
585 */
586uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev,
587 uint32_t reg, uint32_t acc_flags,
588 uint32_t xcc_id)
589{
590 uint32_t ret, rlcg_flag;
591
592 if (amdgpu_device_skip_hw_access(adev))
593 return 0;
594
595 if ((reg * 4) < adev->rmmio_size) {
596 if (amdgpu_sriov_vf(adev) &&
597 !amdgpu_sriov_runtime(adev) &&
598 adev->gfx.rlc.rlcg_reg_access_supported &&
599 amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
600 GC_HWIP, false,
601 &rlcg_flag)) {
602 ret = amdgpu_virt_rlcg_reg_rw(adev, reg, 0, rlcg_flag, xcc_id);
603 } else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
604 amdgpu_sriov_runtime(adev) &&
605 down_read_trylock(&adev->reset_domain->sem)) {
606 ret = amdgpu_kiq_rreg(adev, reg, xcc_id);
607 up_read(&adev->reset_domain->sem);
608 } else {
609 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
610 }
611 } else {
612 ret = adev->pcie_rreg(adev, reg * 4);
613 }
614
615 return ret;
616}
617
421a2a30
ML
618/*
619 * MMIO register write with bytes helper functions
620 * @offset:bytes offset from MMIO start
621 * @value: the value want to be written to the register
b8920e1e
SS
622 */
623
e3ecdffa
AD
624/**
625 * amdgpu_mm_wreg8 - read a memory mapped IO register
626 *
627 * @adev: amdgpu_device pointer
628 * @offset: byte aligned register offset
629 * @value: 8 bit value to write
630 *
631 * Writes the value specified to the offset specified.
632 */
7cbbc745
AG
633void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
634{
56b53c0b 635 if (amdgpu_device_skip_hw_access(adev))
bf36b52e
AG
636 return;
637
421a2a30
ML
638 if (offset < adev->rmmio_size)
639 writeb(value, adev->rmmio + offset);
640 else
641 BUG();
642}
643
e3ecdffa 644/**
f7ee1874 645 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
e3ecdffa
AD
646 *
647 * @adev: amdgpu_device pointer
648 * @reg: dword aligned register offset
649 * @v: 32 bit value to write to the register
650 * @acc_flags: access flags which require special behavior
651 *
652 * Writes the value specified to the offset specified.
653 */
f7ee1874
HZ
654void amdgpu_device_wreg(struct amdgpu_device *adev,
655 uint32_t reg, uint32_t v,
656 uint32_t acc_flags)
d38ceaf9 657{
56b53c0b 658 if (amdgpu_device_skip_hw_access(adev))
bf36b52e
AG
659 return;
660
f7ee1874
HZ
661 if ((reg * 4) < adev->rmmio_size) {
662 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
663 amdgpu_sriov_runtime(adev) &&
d0fb18b5 664 down_read_trylock(&adev->reset_domain->sem)) {
85150626 665 amdgpu_kiq_wreg(adev, reg, v, 0);
d0fb18b5 666 up_read(&adev->reset_domain->sem);
f7ee1874
HZ
667 } else {
668 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
669 }
670 } else {
671 adev->pcie_wreg(adev, reg * 4, v);
81202807 672 }
bc992ba5 673
f7ee1874 674 trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
2e0cc4d4 675}
d38ceaf9 676
03f2abb0 677/**
4cc9f86f 678 * amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if in range
2e0cc4d4 679 *
71579346
RB
680 * @adev: amdgpu_device pointer
681 * @reg: mmio/rlc register
682 * @v: value to write
8057a9d6 683 * @xcc_id: xcc accelerated compute core id
71579346
RB
684 *
685 * this function is invoked only for the debugfs register access
03f2abb0 686 */
f7ee1874 687void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
8ed49dd1
VL
688 uint32_t reg, uint32_t v,
689 uint32_t xcc_id)
2e0cc4d4 690{
56b53c0b 691 if (amdgpu_device_skip_hw_access(adev))
bf36b52e
AG
692 return;
693
2e0cc4d4 694 if (amdgpu_sriov_fullaccess(adev) &&
f7ee1874
HZ
695 adev->gfx.rlc.funcs &&
696 adev->gfx.rlc.funcs->is_rlcg_access_range) {
2e0cc4d4 697 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
8ed49dd1 698 return amdgpu_sriov_wreg(adev, reg, v, 0, 0, xcc_id);
4cc9f86f
TSD
699 } else if ((reg * 4) >= adev->rmmio_size) {
700 adev->pcie_wreg(adev, reg * 4, v);
f7ee1874
HZ
701 } else {
702 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
47ed4e1c 703 }
d38ceaf9
AD
704}
705
85150626
VL
706/**
707 * amdgpu_device_xcc_wreg - write to a memory mapped IO or indirect register with specific XCC
708 *
709 * @adev: amdgpu_device pointer
710 * @reg: dword aligned register offset
711 * @v: 32 bit value to write to the register
712 * @acc_flags: access flags which require special behavior
713 * @xcc_id: xcc accelerated compute core id
714 *
715 * Writes the value specified to the offset specified.
716 */
717void amdgpu_device_xcc_wreg(struct amdgpu_device *adev,
718 uint32_t reg, uint32_t v,
719 uint32_t acc_flags, uint32_t xcc_id)
720{
721 uint32_t rlcg_flag;
722
723 if (amdgpu_device_skip_hw_access(adev))
724 return;
725
726 if ((reg * 4) < adev->rmmio_size) {
727 if (amdgpu_sriov_vf(adev) &&
728 !amdgpu_sriov_runtime(adev) &&
729 adev->gfx.rlc.rlcg_reg_access_supported &&
730 amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
731 GC_HWIP, true,
732 &rlcg_flag)) {
733 amdgpu_virt_rlcg_reg_rw(adev, reg, v, rlcg_flag, xcc_id);
734 } else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
735 amdgpu_sriov_runtime(adev) &&
736 down_read_trylock(&adev->reset_domain->sem)) {
737 amdgpu_kiq_wreg(adev, reg, v, xcc_id);
738 up_read(&adev->reset_domain->sem);
739 } else {
740 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
741 }
742 } else {
743 adev->pcie_wreg(adev, reg * 4, v);
744 }
745}
746
1bba3683
HZ
747/**
748 * amdgpu_device_indirect_rreg - read an indirect register
749 *
750 * @adev: amdgpu_device pointer
22f453fb 751 * @reg_addr: indirect register address to read from
1bba3683
HZ
752 *
753 * Returns the value of indirect register @reg_addr
754 */
755u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
1bba3683
HZ
756 u32 reg_addr)
757{
65ba96e9 758 unsigned long flags, pcie_index, pcie_data;
1bba3683
HZ
759 void __iomem *pcie_index_offset;
760 void __iomem *pcie_data_offset;
65ba96e9
HZ
761 u32 r;
762
763 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
764 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1bba3683
HZ
765
766 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
767 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
768 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
769
770 writel(reg_addr, pcie_index_offset);
771 readl(pcie_index_offset);
772 r = readl(pcie_data_offset);
773 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
774
775 return r;
776}
777
0c552ed3
LM
778u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
779 u64 reg_addr)
780{
781 unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
782 u32 r;
783 void __iomem *pcie_index_offset;
784 void __iomem *pcie_index_hi_offset;
785 void __iomem *pcie_data_offset;
786
ad390542
HZ
787 if (unlikely(!adev->nbio.funcs)) {
788 pcie_index = AMDGPU_PCIE_INDEX_FALLBACK;
789 pcie_data = AMDGPU_PCIE_DATA_FALLBACK;
790 } else {
791 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
792 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
793 }
794
795 if (reg_addr >> 32) {
796 if (unlikely(!adev->nbio.funcs))
797 pcie_index_hi = AMDGPU_PCIE_INDEX_HI_FALLBACK;
798 else
799 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
800 } else {
0c552ed3 801 pcie_index_hi = 0;
ad390542 802 }
0c552ed3
LM
803
804 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
805 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
806 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
807 if (pcie_index_hi != 0)
808 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
809 pcie_index_hi * 4;
810
811 writel(reg_addr, pcie_index_offset);
812 readl(pcie_index_offset);
813 if (pcie_index_hi != 0) {
814 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
815 readl(pcie_index_hi_offset);
816 }
817 r = readl(pcie_data_offset);
818
819 /* clear the high bits */
820 if (pcie_index_hi != 0) {
821 writel(0, pcie_index_hi_offset);
822 readl(pcie_index_hi_offset);
823 }
824
825 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
826
827 return r;
828}
829
1bba3683
HZ
830/**
831 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
832 *
833 * @adev: amdgpu_device pointer
22f453fb 834 * @reg_addr: indirect register address to read from
1bba3683
HZ
835 *
836 * Returns the value of indirect register @reg_addr
837 */
838u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
1bba3683
HZ
839 u32 reg_addr)
840{
65ba96e9 841 unsigned long flags, pcie_index, pcie_data;
1bba3683
HZ
842 void __iomem *pcie_index_offset;
843 void __iomem *pcie_data_offset;
65ba96e9
HZ
844 u64 r;
845
846 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
847 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1bba3683
HZ
848
849 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
850 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
851 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
852
853 /* read low 32 bits */
854 writel(reg_addr, pcie_index_offset);
855 readl(pcie_index_offset);
856 r = readl(pcie_data_offset);
857 /* read high 32 bits */
858 writel(reg_addr + 4, pcie_index_offset);
859 readl(pcie_index_offset);
860 r |= ((u64)readl(pcie_data_offset) << 32);
861 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
862
863 return r;
864}
865
a76b2870
CL
866u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev,
867 u64 reg_addr)
868{
869 unsigned long flags, pcie_index, pcie_data;
870 unsigned long pcie_index_hi = 0;
871 void __iomem *pcie_index_offset;
872 void __iomem *pcie_index_hi_offset;
873 void __iomem *pcie_data_offset;
874 u64 r;
875
876 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
877 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
878 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
879 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
880
881 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
882 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
883 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
884 if (pcie_index_hi != 0)
885 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
886 pcie_index_hi * 4;
887
888 /* read low 32 bits */
889 writel(reg_addr, pcie_index_offset);
890 readl(pcie_index_offset);
891 if (pcie_index_hi != 0) {
892 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
893 readl(pcie_index_hi_offset);
894 }
895 r = readl(pcie_data_offset);
896 /* read high 32 bits */
897 writel(reg_addr + 4, pcie_index_offset);
898 readl(pcie_index_offset);
899 if (pcie_index_hi != 0) {
900 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
901 readl(pcie_index_hi_offset);
902 }
903 r |= ((u64)readl(pcie_data_offset) << 32);
904
905 /* clear the high bits */
906 if (pcie_index_hi != 0) {
907 writel(0, pcie_index_hi_offset);
908 readl(pcie_index_hi_offset);
909 }
910
911 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
912
913 return r;
914}
915
1bba3683
HZ
916/**
917 * amdgpu_device_indirect_wreg - write an indirect register address
918 *
919 * @adev: amdgpu_device pointer
1bba3683
HZ
920 * @reg_addr: indirect register offset
921 * @reg_data: indirect register data
922 *
923 */
924void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
1bba3683
HZ
925 u32 reg_addr, u32 reg_data)
926{
65ba96e9 927 unsigned long flags, pcie_index, pcie_data;
1bba3683
HZ
928 void __iomem *pcie_index_offset;
929 void __iomem *pcie_data_offset;
930
65ba96e9
HZ
931 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
932 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
933
1bba3683
HZ
934 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
935 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
936 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
937
938 writel(reg_addr, pcie_index_offset);
939 readl(pcie_index_offset);
940 writel(reg_data, pcie_data_offset);
941 readl(pcie_data_offset);
942 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
943}
944
0c552ed3
LM
945void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
946 u64 reg_addr, u32 reg_data)
947{
948 unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
949 void __iomem *pcie_index_offset;
950 void __iomem *pcie_index_hi_offset;
951 void __iomem *pcie_data_offset;
952
953 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
954 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
d57e24aa 955 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
0c552ed3
LM
956 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
957 else
958 pcie_index_hi = 0;
959
960 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
961 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
962 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
963 if (pcie_index_hi != 0)
964 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
965 pcie_index_hi * 4;
966
967 writel(reg_addr, pcie_index_offset);
968 readl(pcie_index_offset);
969 if (pcie_index_hi != 0) {
970 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
971 readl(pcie_index_hi_offset);
972 }
973 writel(reg_data, pcie_data_offset);
974 readl(pcie_data_offset);
975
976 /* clear the high bits */
977 if (pcie_index_hi != 0) {
978 writel(0, pcie_index_hi_offset);
979 readl(pcie_index_hi_offset);
980 }
981
982 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
983}
984
1bba3683
HZ
985/**
986 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
987 *
988 * @adev: amdgpu_device pointer
1bba3683
HZ
989 * @reg_addr: indirect register offset
990 * @reg_data: indirect register data
991 *
992 */
993void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
1bba3683
HZ
994 u32 reg_addr, u64 reg_data)
995{
65ba96e9 996 unsigned long flags, pcie_index, pcie_data;
1bba3683
HZ
997 void __iomem *pcie_index_offset;
998 void __iomem *pcie_data_offset;
999
65ba96e9
HZ
1000 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1001 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1002
1bba3683
HZ
1003 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1004 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1005 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1006
1007 /* write low 32 bits */
1008 writel(reg_addr, pcie_index_offset);
1009 readl(pcie_index_offset);
1010 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
1011 readl(pcie_data_offset);
1012 /* write high 32 bits */
1013 writel(reg_addr + 4, pcie_index_offset);
1014 readl(pcie_index_offset);
1015 writel((u32)(reg_data >> 32), pcie_data_offset);
1016 readl(pcie_data_offset);
1017 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1018}
1019
a76b2870
CL
1020void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev,
1021 u64 reg_addr, u64 reg_data)
1022{
1023 unsigned long flags, pcie_index, pcie_data;
1024 unsigned long pcie_index_hi = 0;
1025 void __iomem *pcie_index_offset;
1026 void __iomem *pcie_index_hi_offset;
1027 void __iomem *pcie_data_offset;
1028
1029 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1030 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1031 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
1032 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
1033
1034 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1035 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1036 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1037 if (pcie_index_hi != 0)
1038 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
1039 pcie_index_hi * 4;
1040
1041 /* write low 32 bits */
1042 writel(reg_addr, pcie_index_offset);
1043 readl(pcie_index_offset);
1044 if (pcie_index_hi != 0) {
1045 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1046 readl(pcie_index_hi_offset);
1047 }
1048 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
1049 readl(pcie_data_offset);
1050 /* write high 32 bits */
1051 writel(reg_addr + 4, pcie_index_offset);
1052 readl(pcie_index_offset);
1053 if (pcie_index_hi != 0) {
1054 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1055 readl(pcie_index_hi_offset);
1056 }
1057 writel((u32)(reg_data >> 32), pcie_data_offset);
1058 readl(pcie_data_offset);
1059
1060 /* clear the high bits */
1061 if (pcie_index_hi != 0) {
1062 writel(0, pcie_index_hi_offset);
1063 readl(pcie_index_hi_offset);
1064 }
1065
1066 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1067}
1068
dabc114e
HZ
1069/**
1070 * amdgpu_device_get_rev_id - query device rev_id
1071 *
1072 * @adev: amdgpu_device pointer
1073 *
1074 * Return device rev_id
1075 */
1076u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
1077{
1078 return adev->nbio.funcs->get_rev_id(adev);
1079}
1080
d38ceaf9
AD
1081/**
1082 * amdgpu_invalid_rreg - dummy reg read function
1083 *
982a820b 1084 * @adev: amdgpu_device pointer
d38ceaf9
AD
1085 * @reg: offset of register
1086 *
1087 * Dummy register read function. Used for register blocks
1088 * that certain asics don't have (all asics).
1089 * Returns the value in the register.
1090 */
1091static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
1092{
1093 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
1094 BUG();
1095 return 0;
1096}
1097
0c552ed3
LM
1098static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg)
1099{
1100 DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
1101 BUG();
1102 return 0;
1103}
1104
d38ceaf9
AD
1105/**
1106 * amdgpu_invalid_wreg - dummy reg write function
1107 *
982a820b 1108 * @adev: amdgpu_device pointer
d38ceaf9
AD
1109 * @reg: offset of register
1110 * @v: value to write to the register
1111 *
1112 * Dummy register read function. Used for register blocks
1113 * that certain asics don't have (all asics).
1114 */
1115static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
1116{
1117 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
1118 reg, v);
1119 BUG();
1120}
1121
0c552ed3
LM
1122static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v)
1123{
1124 DRM_ERROR("Invalid callback to write register 0x%llX with 0x%08X\n",
1125 reg, v);
1126 BUG();
1127}
1128
4fa1c6a6
TZ
1129/**
1130 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
1131 *
982a820b 1132 * @adev: amdgpu_device pointer
4fa1c6a6
TZ
1133 * @reg: offset of register
1134 *
1135 * Dummy register read function. Used for register blocks
1136 * that certain asics don't have (all asics).
1137 * Returns the value in the register.
1138 */
1139static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
1140{
1141 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
1142 BUG();
1143 return 0;
1144}
1145
a76b2870
CL
1146static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t reg)
1147{
1148 DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
1149 BUG();
1150 return 0;
1151}
1152
4fa1c6a6
TZ
1153/**
1154 * amdgpu_invalid_wreg64 - dummy reg write function
1155 *
982a820b 1156 * @adev: amdgpu_device pointer
4fa1c6a6
TZ
1157 * @reg: offset of register
1158 * @v: value to write to the register
1159 *
1160 * Dummy register read function. Used for register blocks
1161 * that certain asics don't have (all asics).
1162 */
1163static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
1164{
1165 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
1166 reg, v);
1167 BUG();
1168}
1169
a76b2870
CL
1170static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, uint64_t v)
1171{
1172 DRM_ERROR("Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n",
1173 reg, v);
1174 BUG();
1175}
1176
d38ceaf9
AD
1177/**
1178 * amdgpu_block_invalid_rreg - dummy reg read function
1179 *
982a820b 1180 * @adev: amdgpu_device pointer
d38ceaf9
AD
1181 * @block: offset of instance
1182 * @reg: offset of register
1183 *
1184 * Dummy register read function. Used for register blocks
1185 * that certain asics don't have (all asics).
1186 * Returns the value in the register.
1187 */
1188static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
1189 uint32_t block, uint32_t reg)
1190{
1191 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
1192 reg, block);
1193 BUG();
1194 return 0;
1195}
1196
1197/**
1198 * amdgpu_block_invalid_wreg - dummy reg write function
1199 *
982a820b 1200 * @adev: amdgpu_device pointer
d38ceaf9
AD
1201 * @block: offset of instance
1202 * @reg: offset of register
1203 * @v: value to write to the register
1204 *
1205 * Dummy register read function. Used for register blocks
1206 * that certain asics don't have (all asics).
1207 */
1208static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
1209 uint32_t block,
1210 uint32_t reg, uint32_t v)
1211{
1212 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
1213 reg, block, v);
1214 BUG();
1215}
1216
4d2997ab
AD
1217/**
1218 * amdgpu_device_asic_init - Wrapper for atom asic_init
1219 *
982a820b 1220 * @adev: amdgpu_device pointer
4d2997ab
AD
1221 *
1222 * Does any asic specific work and then calls atom asic init.
1223 */
1224static int amdgpu_device_asic_init(struct amdgpu_device *adev)
1225{
7656168a
LL
1226 int ret;
1227
4d2997ab
AD
1228 amdgpu_asic_pre_asic_init(adev);
1229
4e8303cf
LL
1230 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
1231 amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
7656168a
LL
1232 amdgpu_psp_wait_for_bootloader(adev);
1233 ret = amdgpu_atomfirmware_asic_init(adev, true);
1234 return ret;
1235 } else {
85d1bcc6 1236 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
7656168a
LL
1237 }
1238
1239 return 0;
4d2997ab
AD
1240}
1241
e3ecdffa 1242/**
7ccfd79f 1243 * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
e3ecdffa 1244 *
982a820b 1245 * @adev: amdgpu_device pointer
e3ecdffa
AD
1246 *
1247 * Allocates a scratch page of VRAM for use by various things in the
1248 * driver.
1249 */
7ccfd79f 1250static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
d38ceaf9 1251{
7ccfd79f
CK
1252 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
1253 AMDGPU_GEM_DOMAIN_VRAM |
1254 AMDGPU_GEM_DOMAIN_GTT,
1255 &adev->mem_scratch.robj,
1256 &adev->mem_scratch.gpu_addr,
1257 (void **)&adev->mem_scratch.ptr);
d38ceaf9
AD
1258}
1259
e3ecdffa 1260/**
7ccfd79f 1261 * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
e3ecdffa 1262 *
982a820b 1263 * @adev: amdgpu_device pointer
e3ecdffa
AD
1264 *
1265 * Frees the VRAM scratch page.
1266 */
7ccfd79f 1267static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
d38ceaf9 1268{
7ccfd79f 1269 amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
d38ceaf9
AD
1270}
1271
1272/**
9c3f2b54 1273 * amdgpu_device_program_register_sequence - program an array of registers.
d38ceaf9
AD
1274 *
1275 * @adev: amdgpu_device pointer
1276 * @registers: pointer to the register array
1277 * @array_size: size of the register array
1278 *
b8920e1e 1279 * Programs an array or registers with and or masks.
d38ceaf9
AD
1280 * This is a helper for setting golden registers.
1281 */
9c3f2b54
AD
1282void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
1283 const u32 *registers,
1284 const u32 array_size)
d38ceaf9
AD
1285{
1286 u32 tmp, reg, and_mask, or_mask;
1287 int i;
1288
1289 if (array_size % 3)
1290 return;
1291
47fc644f 1292 for (i = 0; i < array_size; i += 3) {
d38ceaf9
AD
1293 reg = registers[i + 0];
1294 and_mask = registers[i + 1];
1295 or_mask = registers[i + 2];
1296
1297 if (and_mask == 0xffffffff) {
1298 tmp = or_mask;
1299 } else {
1300 tmp = RREG32(reg);
1301 tmp &= ~and_mask;
e0d07657
HZ
1302 if (adev->family >= AMDGPU_FAMILY_AI)
1303 tmp |= (or_mask & and_mask);
1304 else
1305 tmp |= or_mask;
d38ceaf9
AD
1306 }
1307 WREG32(reg, tmp);
1308 }
1309}
1310
e3ecdffa
AD
1311/**
1312 * amdgpu_device_pci_config_reset - reset the GPU
1313 *
1314 * @adev: amdgpu_device pointer
1315 *
1316 * Resets the GPU using the pci config reset sequence.
1317 * Only applicable to asics prior to vega10.
1318 */
8111c387 1319void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
d38ceaf9
AD
1320{
1321 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1322}
1323
af484df8
AD
1324/**
1325 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1326 *
1327 * @adev: amdgpu_device pointer
1328 *
1329 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1330 */
1331int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1332{
1333 return pci_reset_function(adev->pdev);
1334}
1335
d38ceaf9 1336/*
06ec9070 1337 * amdgpu_device_wb_*()
455a7bc2 1338 * Writeback is the method by which the GPU updates special pages in memory
ea81a173 1339 * with the status of certain GPU events (fences, ring pointers,etc.).
d38ceaf9
AD
1340 */
1341
1342/**
06ec9070 1343 * amdgpu_device_wb_fini - Disable Writeback and free memory
d38ceaf9
AD
1344 *
1345 * @adev: amdgpu_device pointer
1346 *
1347 * Disables Writeback and frees the Writeback memory (all asics).
1348 * Used at driver shutdown.
1349 */
06ec9070 1350static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
d38ceaf9
AD
1351{
1352 if (adev->wb.wb_obj) {
a76ed485
AD
1353 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1354 &adev->wb.gpu_addr,
1355 (void **)&adev->wb.wb);
d38ceaf9
AD
1356 adev->wb.wb_obj = NULL;
1357 }
1358}
1359
1360/**
03f2abb0 1361 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
d38ceaf9
AD
1362 *
1363 * @adev: amdgpu_device pointer
1364 *
455a7bc2 1365 * Initializes writeback and allocates writeback memory (all asics).
d38ceaf9
AD
1366 * Used at driver startup.
1367 * Returns 0 on success or an -error on failure.
1368 */
06ec9070 1369static int amdgpu_device_wb_init(struct amdgpu_device *adev)
d38ceaf9
AD
1370{
1371 int r;
1372
1373 if (adev->wb.wb_obj == NULL) {
97407b63
AD
1374 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1375 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
a76ed485
AD
1376 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1377 &adev->wb.wb_obj, &adev->wb.gpu_addr,
1378 (void **)&adev->wb.wb);
d38ceaf9
AD
1379 if (r) {
1380 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1381 return r;
1382 }
d38ceaf9
AD
1383
1384 adev->wb.num_wb = AMDGPU_MAX_WB;
1385 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1386
1387 /* clear wb memory */
73469585 1388 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
d38ceaf9
AD
1389 }
1390
1391 return 0;
1392}
1393
1394/**
131b4b36 1395 * amdgpu_device_wb_get - Allocate a wb entry
d38ceaf9
AD
1396 *
1397 * @adev: amdgpu_device pointer
1398 * @wb: wb index
1399 *
1400 * Allocate a wb slot for use by the driver (all asics).
1401 * Returns 0 on success or -EINVAL on failure.
1402 */
131b4b36 1403int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
d38ceaf9
AD
1404{
1405 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
d38ceaf9 1406
97407b63 1407 if (offset < adev->wb.num_wb) {
7014285a 1408 __set_bit(offset, adev->wb.used);
63ae07ca 1409 *wb = offset << 3; /* convert to dw offset */
0915fdbc
ML
1410 return 0;
1411 } else {
1412 return -EINVAL;
1413 }
1414}
1415
d38ceaf9 1416/**
131b4b36 1417 * amdgpu_device_wb_free - Free a wb entry
d38ceaf9
AD
1418 *
1419 * @adev: amdgpu_device pointer
1420 * @wb: wb index
1421 *
1422 * Free a wb slot allocated for use by the driver (all asics)
1423 */
131b4b36 1424void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
d38ceaf9 1425{
73469585 1426 wb >>= 3;
d38ceaf9 1427 if (wb < adev->wb.num_wb)
73469585 1428 __clear_bit(wb, adev->wb.used);
d38ceaf9
AD
1429}
1430
d6895ad3
CK
1431/**
1432 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1433 *
1434 * @adev: amdgpu_device pointer
1435 *
1436 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1437 * to fail, but if any of the BARs is not accessible after the size we abort
1438 * driver loading by returning -ENODEV.
1439 */
1440int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1441{
453f617a 1442 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
31b8adab
CK
1443 struct pci_bus *root;
1444 struct resource *res;
b8920e1e 1445 unsigned int i;
d6895ad3
CK
1446 u16 cmd;
1447 int r;
1448
822130b5
AB
1449 if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
1450 return 0;
1451
0c03b912 1452 /* Bypass for VF */
1453 if (amdgpu_sriov_vf(adev))
1454 return 0;
1455
e372baeb
MJ
1456 /* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
1457 if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
1458 DRM_WARN("System can't access extended configuration space,please check!!\n");
1459
b7221f2b
AD
1460 /* skip if the bios has already enabled large BAR */
1461 if (adev->gmc.real_vram_size &&
1462 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1463 return 0;
1464
31b8adab
CK
1465 /* Check if the root BUS has 64bit memory resources */
1466 root = adev->pdev->bus;
1467 while (root->parent)
1468 root = root->parent;
1469
1470 pci_bus_for_each_resource(root, res, i) {
0ebb7c54 1471 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
31b8adab
CK
1472 res->start > 0x100000000ull)
1473 break;
1474 }
1475
1476 /* Trying to resize is pointless without a root hub window above 4GB */
1477 if (!res)
1478 return 0;
1479
453f617a
ND
1480 /* Limit the BAR size to what is available */
1481 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1482 rbar_size);
1483
d6895ad3
CK
1484 /* Disable memory decoding while we change the BAR addresses and size */
1485 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1486 pci_write_config_word(adev->pdev, PCI_COMMAND,
1487 cmd & ~PCI_COMMAND_MEMORY);
1488
1489 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
43c064db 1490 amdgpu_doorbell_fini(adev);
d6895ad3
CK
1491 if (adev->asic_type >= CHIP_BONAIRE)
1492 pci_release_resource(adev->pdev, 2);
1493
1494 pci_release_resource(adev->pdev, 0);
1495
1496 r = pci_resize_resource(adev->pdev, 0, rbar_size);
1497 if (r == -ENOSPC)
1498 DRM_INFO("Not enough PCI address space for a large BAR.");
1499 else if (r && r != -ENOTSUPP)
1500 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1501
1502 pci_assign_unassigned_bus_resources(adev->pdev->bus);
1503
1504 /* When the doorbell or fb BAR isn't available we have no chance of
1505 * using the device.
1506 */
43c064db 1507 r = amdgpu_doorbell_init(adev);
d6895ad3
CK
1508 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1509 return -ENODEV;
1510
1511 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1512
1513 return 0;
1514}
a05502e5 1515
9535a86a
SZ
1516static bool amdgpu_device_read_bios(struct amdgpu_device *adev)
1517{
b8920e1e 1518 if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
9535a86a 1519 return false;
9535a86a
SZ
1520
1521 return true;
1522}
1523
d38ceaf9
AD
1524/*
1525 * GPU helpers function.
1526 */
1527/**
39c640c0 1528 * amdgpu_device_need_post - check if the hw need post or not
d38ceaf9
AD
1529 *
1530 * @adev: amdgpu_device pointer
1531 *
c836fec5
JQ
1532 * Check if the asic has been initialized (all asics) at driver startup
1533 * or post is needed if hw reset is performed.
1534 * Returns true if need or false if not.
d38ceaf9 1535 */
39c640c0 1536bool amdgpu_device_need_post(struct amdgpu_device *adev)
d38ceaf9
AD
1537{
1538 uint32_t reg;
1539
bec86378
ML
1540 if (amdgpu_sriov_vf(adev))
1541 return false;
1542
9535a86a
SZ
1543 if (!amdgpu_device_read_bios(adev))
1544 return false;
1545
bec86378 1546 if (amdgpu_passthrough(adev)) {
1da2c326
ML
1547 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1548 * some old smc fw still need driver do vPost otherwise gpu hang, while
1549 * those smc fw version above 22.15 doesn't have this flaw, so we force
1550 * vpost executed for smc version below 22.15
bec86378
ML
1551 */
1552 if (adev->asic_type == CHIP_FIJI) {
1553 int err;
1554 uint32_t fw_ver;
b8920e1e 1555
bec86378
ML
1556 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1557 /* force vPost if error occured */
1558 if (err)
1559 return true;
1560
1561 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
8a44fdd3 1562 release_firmware(adev->pm.fw);
1da2c326
ML
1563 if (fw_ver < 0x00160e00)
1564 return true;
bec86378 1565 }
bec86378 1566 }
91fe77eb 1567
e3c1b071 1568 /* Don't post if we need to reset whole hive on init */
1569 if (adev->gmc.xgmi.pending_reset)
1570 return false;
1571
91fe77eb 1572 if (adev->has_hw_reset) {
1573 adev->has_hw_reset = false;
1574 return true;
1575 }
1576
1577 /* bios scratch used on CIK+ */
1578 if (adev->asic_type >= CHIP_BONAIRE)
1579 return amdgpu_atombios_scratch_need_asic_init(adev);
1580
1581 /* check MEM_SIZE for older asics */
1582 reg = amdgpu_asic_get_config_memsize(adev);
1583
1584 if ((reg != 0) && (reg != 0xffffffff))
1585 return false;
1586
1587 return true;
bec86378
ML
1588}
1589
5d1eb4c4 1590/*
bb0f8429
ML
1591 * Check whether seamless boot is supported.
1592 *
7f4ce7b5
ML
1593 * So far we only support seamless boot on DCE 3.0 or later.
1594 * If users report that it works on older ASICS as well, we may
1595 * loosen this.
bb0f8429
ML
1596 */
1597bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev)
1598{
5dc270d3
ML
1599 switch (amdgpu_seamless) {
1600 case -1:
1601 break;
1602 case 1:
1603 return true;
1604 case 0:
1605 return false;
1606 default:
1607 DRM_ERROR("Invalid value for amdgpu.seamless: %d\n",
1608 amdgpu_seamless);
1609 return false;
1610 }
1611
3657a1d5
ML
1612 if (!(adev->flags & AMD_IS_APU))
1613 return false;
1614
5dc270d3
ML
1615 if (adev->mman.keep_stolen_vga_memory)
1616 return false;
1617
ed342a2e 1618 return amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0);
bb0f8429
ML
1619}
1620
5d1eb4c4 1621/*
2757a848
ML
1622 * Intel hosts such as Rocket Lake, Alder Lake, Raptor Lake and Sapphire Rapids
1623 * don't support dynamic speed switching. Until we have confirmation from Intel
1624 * that a specific host supports it, it's safer that we keep it disabled for all.
5d1eb4c4
ML
1625 *
1626 * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
1627 * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
1628 */
d9b3a066 1629static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device *adev)
5d1eb4c4
ML
1630{
1631#if IS_ENABLED(CONFIG_X86)
1632 struct cpuinfo_x86 *c = &cpu_data(0);
1633
d9b3a066
ML
1634 /* eGPU change speeds based on USB4 fabric conditions */
1635 if (dev_is_removable(adev->dev))
1636 return true;
1637
5d1eb4c4
ML
1638 if (c->x86_vendor == X86_VENDOR_INTEL)
1639 return false;
1640#endif
1641 return true;
1642}
1643
0ab5d711
ML
1644/**
1645 * amdgpu_device_should_use_aspm - check if the device should program ASPM
1646 *
1647 * @adev: amdgpu_device pointer
1648 *
1649 * Confirm whether the module parameter and pcie bridge agree that ASPM should
1650 * be set for this device.
1651 *
1652 * Returns true if it should be used or false if not.
1653 */
1654bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1655{
1656 switch (amdgpu_aspm) {
1657 case -1:
1658 break;
1659 case 0:
1660 return false;
1661 case 1:
1662 return true;
1663 default:
1664 return false;
1665 }
1a6513de
ML
1666 if (adev->flags & AMD_IS_APU)
1667 return false;
2757a848
ML
1668 if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK))
1669 return false;
0ab5d711
ML
1670 return pcie_aspm_enabled(adev->pdev);
1671}
1672
d38ceaf9
AD
1673/* if we get transitioned to only one device, take VGA back */
1674/**
06ec9070 1675 * amdgpu_device_vga_set_decode - enable/disable vga decode
d38ceaf9 1676 *
bf44e8ce 1677 * @pdev: PCI device pointer
d38ceaf9
AD
1678 * @state: enable/disable vga decode
1679 *
1680 * Enable/disable vga decode (all asics).
1681 * Returns VGA resource flags.
1682 */
bf44e8ce
CH
1683static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1684 bool state)
d38ceaf9 1685{
bf44e8ce 1686 struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
b8920e1e 1687
d38ceaf9
AD
1688 amdgpu_asic_set_vga_state(adev, state);
1689 if (state)
1690 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1691 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1692 else
1693 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1694}
1695
e3ecdffa
AD
1696/**
1697 * amdgpu_device_check_block_size - validate the vm block size
1698 *
1699 * @adev: amdgpu_device pointer
1700 *
1701 * Validates the vm block size specified via module parameter.
1702 * The vm block size defines number of bits in page table versus page directory,
1703 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1704 * page table and the remaining bits are in the page directory.
1705 */
06ec9070 1706static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
a1adf8be
CZ
1707{
1708 /* defines number of bits in page table versus page directory,
1709 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
b8920e1e
SS
1710 * page table and the remaining bits are in the page directory
1711 */
bab4fee7
JZ
1712 if (amdgpu_vm_block_size == -1)
1713 return;
a1adf8be 1714
bab4fee7 1715 if (amdgpu_vm_block_size < 9) {
a1adf8be
CZ
1716 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1717 amdgpu_vm_block_size);
97489129 1718 amdgpu_vm_block_size = -1;
a1adf8be 1719 }
a1adf8be
CZ
1720}
1721
e3ecdffa
AD
1722/**
1723 * amdgpu_device_check_vm_size - validate the vm size
1724 *
1725 * @adev: amdgpu_device pointer
1726 *
1727 * Validates the vm size in GB specified via module parameter.
1728 * The VM size is the size of the GPU virtual memory space in GB.
1729 */
06ec9070 1730static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
83ca145d 1731{
64dab074
AD
1732 /* no need to check the default value */
1733 if (amdgpu_vm_size == -1)
1734 return;
1735
83ca145d
ZJ
1736 if (amdgpu_vm_size < 1) {
1737 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1738 amdgpu_vm_size);
f3368128 1739 amdgpu_vm_size = -1;
83ca145d 1740 }
83ca145d
ZJ
1741}
1742
7951e376
RZ
1743static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1744{
1745 struct sysinfo si;
a9d4fe2f 1746 bool is_os_64 = (sizeof(void *) == 8);
7951e376
RZ
1747 uint64_t total_memory;
1748 uint64_t dram_size_seven_GB = 0x1B8000000;
1749 uint64_t dram_size_three_GB = 0xB8000000;
1750
1751 if (amdgpu_smu_memory_pool_size == 0)
1752 return;
1753
1754 if (!is_os_64) {
1755 DRM_WARN("Not 64-bit OS, feature not supported\n");
1756 goto def_value;
1757 }
1758 si_meminfo(&si);
1759 total_memory = (uint64_t)si.totalram * si.mem_unit;
1760
1761 if ((amdgpu_smu_memory_pool_size == 1) ||
1762 (amdgpu_smu_memory_pool_size == 2)) {
1763 if (total_memory < dram_size_three_GB)
1764 goto def_value1;
1765 } else if ((amdgpu_smu_memory_pool_size == 4) ||
1766 (amdgpu_smu_memory_pool_size == 8)) {
1767 if (total_memory < dram_size_seven_GB)
1768 goto def_value1;
1769 } else {
1770 DRM_WARN("Smu memory pool size not supported\n");
1771 goto def_value;
1772 }
1773 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1774
1775 return;
1776
1777def_value1:
1778 DRM_WARN("No enough system memory\n");
1779def_value:
1780 adev->pm.smu_prv_buffer_size = 0;
1781}
1782
9f6a7857
HR
1783static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1784{
1785 if (!(adev->flags & AMD_IS_APU) ||
1786 adev->asic_type < CHIP_RAVEN)
1787 return 0;
1788
1789 switch (adev->asic_type) {
1790 case CHIP_RAVEN:
1791 if (adev->pdev->device == 0x15dd)
1792 adev->apu_flags |= AMD_APU_IS_RAVEN;
1793 if (adev->pdev->device == 0x15d8)
1794 adev->apu_flags |= AMD_APU_IS_PICASSO;
1795 break;
1796 case CHIP_RENOIR:
1797 if ((adev->pdev->device == 0x1636) ||
1798 (adev->pdev->device == 0x164c))
1799 adev->apu_flags |= AMD_APU_IS_RENOIR;
1800 else
1801 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1802 break;
1803 case CHIP_VANGOGH:
1804 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1805 break;
1806 case CHIP_YELLOW_CARP:
1807 break;
d0f56dc2 1808 case CHIP_CYAN_SKILLFISH:
dfcc3e8c
AD
1809 if ((adev->pdev->device == 0x13FE) ||
1810 (adev->pdev->device == 0x143F))
d0f56dc2
TZ
1811 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1812 break;
9f6a7857 1813 default:
4eaf21b7 1814 break;
9f6a7857
HR
1815 }
1816
1817 return 0;
1818}
1819
d38ceaf9 1820/**
06ec9070 1821 * amdgpu_device_check_arguments - validate module params
d38ceaf9
AD
1822 *
1823 * @adev: amdgpu_device pointer
1824 *
1825 * Validates certain module parameters and updates
1826 * the associated values used by the driver (all asics).
1827 */
912dfc84 1828static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
d38ceaf9 1829{
5b011235
CZ
1830 if (amdgpu_sched_jobs < 4) {
1831 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1832 amdgpu_sched_jobs);
1833 amdgpu_sched_jobs = 4;
47fc644f 1834 } else if (!is_power_of_2(amdgpu_sched_jobs)) {
5b011235
CZ
1835 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1836 amdgpu_sched_jobs);
1837 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1838 }
d38ceaf9 1839
83e74db6 1840 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
f9321cc4
CK
1841 /* gart size must be greater or equal to 32M */
1842 dev_warn(adev->dev, "gart size (%d) too small\n",
1843 amdgpu_gart_size);
83e74db6 1844 amdgpu_gart_size = -1;
d38ceaf9
AD
1845 }
1846
36d38372 1847 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
c4e1a13a 1848 /* gtt size must be greater or equal to 32M */
36d38372
CK
1849 dev_warn(adev->dev, "gtt size (%d) too small\n",
1850 amdgpu_gtt_size);
1851 amdgpu_gtt_size = -1;
d38ceaf9
AD
1852 }
1853
d07f14be
RH
1854 /* valid range is between 4 and 9 inclusive */
1855 if (amdgpu_vm_fragment_size != -1 &&
1856 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1857 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1858 amdgpu_vm_fragment_size = -1;
1859 }
1860
5d5bd5e3
KW
1861 if (amdgpu_sched_hw_submission < 2) {
1862 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1863 amdgpu_sched_hw_submission);
1864 amdgpu_sched_hw_submission = 2;
1865 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1866 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1867 amdgpu_sched_hw_submission);
1868 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1869 }
1870
2656fd23
AG
1871 if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1872 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1873 amdgpu_reset_method = -1;
1874 }
1875
7951e376
RZ
1876 amdgpu_device_check_smu_prv_buffer_size(adev);
1877
06ec9070 1878 amdgpu_device_check_vm_size(adev);
d38ceaf9 1879
06ec9070 1880 amdgpu_device_check_block_size(adev);
6a7f76e7 1881
19aede77 1882 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
912dfc84 1883
e3c00faa 1884 return 0;
d38ceaf9
AD
1885}
1886
1887/**
1888 * amdgpu_switcheroo_set_state - set switcheroo state
1889 *
1890 * @pdev: pci dev pointer
1694467b 1891 * @state: vga_switcheroo state
d38ceaf9 1892 *
12024b17 1893 * Callback for the switcheroo driver. Suspends or resumes
d38ceaf9
AD
1894 * the asics before or after it is powered up using ACPI methods.
1895 */
8aba21b7
LT
1896static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1897 enum vga_switcheroo_state state)
d38ceaf9
AD
1898{
1899 struct drm_device *dev = pci_get_drvdata(pdev);
de185019 1900 int r;
d38ceaf9 1901
b98c6299 1902 if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
d38ceaf9
AD
1903 return;
1904
1905 if (state == VGA_SWITCHEROO_ON) {
dd4fa6c1 1906 pr_info("switched on\n");
d38ceaf9
AD
1907 /* don't suspend or resume card normally */
1908 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1909
8f66090b
TZ
1910 pci_set_power_state(pdev, PCI_D0);
1911 amdgpu_device_load_pci_state(pdev);
1912 r = pci_enable_device(pdev);
de185019
AD
1913 if (r)
1914 DRM_WARN("pci_enable_device failed (%d)\n", r);
1915 amdgpu_device_resume(dev, true);
d38ceaf9 1916
d38ceaf9 1917 dev->switch_power_state = DRM_SWITCH_POWER_ON;
d38ceaf9 1918 } else {
dd4fa6c1 1919 pr_info("switched off\n");
d38ceaf9 1920 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
5095d541 1921 amdgpu_device_prepare(dev);
de185019 1922 amdgpu_device_suspend(dev, true);
8f66090b 1923 amdgpu_device_cache_pci_state(pdev);
de185019 1924 /* Shut down the device */
8f66090b
TZ
1925 pci_disable_device(pdev);
1926 pci_set_power_state(pdev, PCI_D3cold);
d38ceaf9
AD
1927 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1928 }
1929}
1930
1931/**
1932 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1933 *
1934 * @pdev: pci dev pointer
1935 *
1936 * Callback for the switcheroo driver. Check of the switcheroo
1937 * state can be changed.
1938 * Returns true if the state can be changed, false if not.
1939 */
1940static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1941{
1942 struct drm_device *dev = pci_get_drvdata(pdev);
1943
b8920e1e 1944 /*
d38ceaf9
AD
1945 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1946 * locking inversion with the driver load path. And the access here is
1947 * completely racy anyway. So don't bother with locking for now.
1948 */
7e13ad89 1949 return atomic_read(&dev->open_count) == 0;
d38ceaf9
AD
1950}
1951
1952static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1953 .set_gpu_state = amdgpu_switcheroo_set_state,
1954 .reprobe = NULL,
1955 .can_switch = amdgpu_switcheroo_can_switch,
1956};
1957
e3ecdffa
AD
1958/**
1959 * amdgpu_device_ip_set_clockgating_state - set the CG state
1960 *
87e3f136 1961 * @dev: amdgpu_device pointer
e3ecdffa
AD
1962 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1963 * @state: clockgating state (gate or ungate)
1964 *
1965 * Sets the requested clockgating state for all instances of
1966 * the hardware IP specified.
1967 * Returns the error code from the last instance.
1968 */
43fa561f 1969int amdgpu_device_ip_set_clockgating_state(void *dev,
2990a1fc
AD
1970 enum amd_ip_block_type block_type,
1971 enum amd_clockgating_state state)
d38ceaf9 1972{
43fa561f 1973 struct amdgpu_device *adev = dev;
d38ceaf9
AD
1974 int i, r = 0;
1975
1976 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1977 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1978 continue;
c722865a
RZ
1979 if (adev->ip_blocks[i].version->type != block_type)
1980 continue;
1981 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1982 continue;
1983 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1984 (void *)adev, state);
1985 if (r)
1986 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1987 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1988 }
1989 return r;
1990}
1991
e3ecdffa
AD
1992/**
1993 * amdgpu_device_ip_set_powergating_state - set the PG state
1994 *
87e3f136 1995 * @dev: amdgpu_device pointer
e3ecdffa
AD
1996 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1997 * @state: powergating state (gate or ungate)
1998 *
1999 * Sets the requested powergating state for all instances of
2000 * the hardware IP specified.
2001 * Returns the error code from the last instance.
2002 */
43fa561f 2003int amdgpu_device_ip_set_powergating_state(void *dev,
2990a1fc
AD
2004 enum amd_ip_block_type block_type,
2005 enum amd_powergating_state state)
d38ceaf9 2006{
43fa561f 2007 struct amdgpu_device *adev = dev;
d38ceaf9
AD
2008 int i, r = 0;
2009
2010 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2011 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 2012 continue;
c722865a
RZ
2013 if (adev->ip_blocks[i].version->type != block_type)
2014 continue;
2015 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
2016 continue;
2017 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
2018 (void *)adev, state);
2019 if (r)
2020 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
2021 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
2022 }
2023 return r;
2024}
2025
e3ecdffa
AD
2026/**
2027 * amdgpu_device_ip_get_clockgating_state - get the CG state
2028 *
2029 * @adev: amdgpu_device pointer
2030 * @flags: clockgating feature flags
2031 *
2032 * Walks the list of IPs on the device and updates the clockgating
2033 * flags for each IP.
2034 * Updates @flags with the feature flags for each hardware IP where
2035 * clockgating is enabled.
2036 */
2990a1fc 2037void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
25faeddc 2038 u64 *flags)
6cb2d4e4
HR
2039{
2040 int i;
2041
2042 for (i = 0; i < adev->num_ip_blocks; i++) {
2043 if (!adev->ip_blocks[i].status.valid)
2044 continue;
2045 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
2046 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
2047 }
2048}
2049
e3ecdffa
AD
2050/**
2051 * amdgpu_device_ip_wait_for_idle - wait for idle
2052 *
2053 * @adev: amdgpu_device pointer
2054 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2055 *
2056 * Waits for the request hardware IP to be idle.
2057 * Returns 0 for success or a negative error code on failure.
2058 */
2990a1fc
AD
2059int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
2060 enum amd_ip_block_type block_type)
5dbbb60b
AD
2061{
2062 int i, r;
2063
2064 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2065 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 2066 continue;
a1255107
AD
2067 if (adev->ip_blocks[i].version->type == block_type) {
2068 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
5dbbb60b
AD
2069 if (r)
2070 return r;
2071 break;
2072 }
2073 }
2074 return 0;
2075
2076}
2077
e3ecdffa
AD
2078/**
2079 * amdgpu_device_ip_is_idle - is the hardware IP idle
2080 *
2081 * @adev: amdgpu_device pointer
2082 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2083 *
2084 * Check if the hardware IP is idle or not.
2085 * Returns true if it the IP is idle, false if not.
2086 */
2990a1fc
AD
2087bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
2088 enum amd_ip_block_type block_type)
5dbbb60b
AD
2089{
2090 int i;
2091
2092 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2093 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 2094 continue;
a1255107
AD
2095 if (adev->ip_blocks[i].version->type == block_type)
2096 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
5dbbb60b
AD
2097 }
2098 return true;
2099
2100}
2101
e3ecdffa
AD
2102/**
2103 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
2104 *
2105 * @adev: amdgpu_device pointer
87e3f136 2106 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
e3ecdffa
AD
2107 *
2108 * Returns a pointer to the hardware IP block structure
2109 * if it exists for the asic, otherwise NULL.
2110 */
2990a1fc
AD
2111struct amdgpu_ip_block *
2112amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
2113 enum amd_ip_block_type type)
d38ceaf9
AD
2114{
2115 int i;
2116
2117 for (i = 0; i < adev->num_ip_blocks; i++)
a1255107 2118 if (adev->ip_blocks[i].version->type == type)
d38ceaf9
AD
2119 return &adev->ip_blocks[i];
2120
2121 return NULL;
2122}
2123
2124/**
2990a1fc 2125 * amdgpu_device_ip_block_version_cmp
d38ceaf9
AD
2126 *
2127 * @adev: amdgpu_device pointer
5fc3aeeb 2128 * @type: enum amd_ip_block_type
d38ceaf9
AD
2129 * @major: major version
2130 * @minor: minor version
2131 *
2132 * return 0 if equal or greater
2133 * return 1 if smaller or the ip_block doesn't exist
2134 */
2990a1fc
AD
2135int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
2136 enum amd_ip_block_type type,
2137 u32 major, u32 minor)
d38ceaf9 2138{
2990a1fc 2139 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
d38ceaf9 2140
a1255107
AD
2141 if (ip_block && ((ip_block->version->major > major) ||
2142 ((ip_block->version->major == major) &&
2143 (ip_block->version->minor >= minor))))
d38ceaf9
AD
2144 return 0;
2145
2146 return 1;
2147}
2148
a1255107 2149/**
2990a1fc 2150 * amdgpu_device_ip_block_add
a1255107
AD
2151 *
2152 * @adev: amdgpu_device pointer
2153 * @ip_block_version: pointer to the IP to add
2154 *
2155 * Adds the IP block driver information to the collection of IPs
2156 * on the asic.
2157 */
2990a1fc
AD
2158int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
2159 const struct amdgpu_ip_block_version *ip_block_version)
a1255107
AD
2160{
2161 if (!ip_block_version)
2162 return -EINVAL;
2163
7bd939d0
LG
2164 switch (ip_block_version->type) {
2165 case AMD_IP_BLOCK_TYPE_VCN:
2166 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
2167 return 0;
2168 break;
2169 case AMD_IP_BLOCK_TYPE_JPEG:
2170 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
2171 return 0;
2172 break;
2173 default:
2174 break;
2175 }
2176
e966a725 2177 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
a0bae357
HR
2178 ip_block_version->funcs->name);
2179
a1255107
AD
2180 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
2181
2182 return 0;
2183}
2184
e3ecdffa
AD
2185/**
2186 * amdgpu_device_enable_virtual_display - enable virtual display feature
2187 *
2188 * @adev: amdgpu_device pointer
2189 *
2190 * Enabled the virtual display feature if the user has enabled it via
2191 * the module parameter virtual_display. This feature provides a virtual
2192 * display hardware on headless boards or in virtualized environments.
2193 * This function parses and validates the configuration string specified by
2194 * the user and configues the virtual display configuration (number of
2195 * virtual connectors, crtcs, etc.) specified.
2196 */
483ef985 2197static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
9accf2fd
ED
2198{
2199 adev->enable_virtual_display = false;
2200
2201 if (amdgpu_virtual_display) {
8f66090b 2202 const char *pci_address_name = pci_name(adev->pdev);
0f66356d 2203 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
9accf2fd
ED
2204
2205 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
2206 pciaddstr_tmp = pciaddstr;
0f66356d
ED
2207 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
2208 pciaddname = strsep(&pciaddname_tmp, ",");
967de2a9
YT
2209 if (!strcmp("all", pciaddname)
2210 || !strcmp(pci_address_name, pciaddname)) {
0f66356d
ED
2211 long num_crtc;
2212 int res = -1;
2213
9accf2fd 2214 adev->enable_virtual_display = true;
0f66356d
ED
2215
2216 if (pciaddname_tmp)
2217 res = kstrtol(pciaddname_tmp, 10,
2218 &num_crtc);
2219
2220 if (!res) {
2221 if (num_crtc < 1)
2222 num_crtc = 1;
2223 if (num_crtc > 6)
2224 num_crtc = 6;
2225 adev->mode_info.num_crtc = num_crtc;
2226 } else {
2227 adev->mode_info.num_crtc = 1;
2228 }
9accf2fd
ED
2229 break;
2230 }
2231 }
2232
0f66356d
ED
2233 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
2234 amdgpu_virtual_display, pci_address_name,
2235 adev->enable_virtual_display, adev->mode_info.num_crtc);
9accf2fd
ED
2236
2237 kfree(pciaddstr);
2238 }
2239}
2240
25263da3
AD
2241void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
2242{
2243 if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
2244 adev->mode_info.num_crtc = 1;
2245 adev->enable_virtual_display = true;
2246 DRM_INFO("virtual_display:%d, num_crtc:%d\n",
2247 adev->enable_virtual_display, adev->mode_info.num_crtc);
2248 }
2249}
2250
e3ecdffa
AD
2251/**
2252 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
2253 *
2254 * @adev: amdgpu_device pointer
2255 *
2256 * Parses the asic configuration parameters specified in the gpu info
2257 * firmware and makes them availale to the driver for use in configuring
2258 * the asic.
2259 * Returns 0 on success, -EINVAL on failure.
2260 */
e2a75f88
AD
2261static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
2262{
e2a75f88 2263 const char *chip_name;
c0a43457 2264 char fw_name[40];
e2a75f88
AD
2265 int err;
2266 const struct gpu_info_firmware_header_v1_0 *hdr;
2267
ab4fe3e1
HR
2268 adev->firmware.gpu_info_fw = NULL;
2269
fb915c87
AD
2270 if (adev->mman.discovery_bin)
2271 return 0;
258620d0 2272
e2a75f88 2273 switch (adev->asic_type) {
e2a75f88
AD
2274 default:
2275 return 0;
2276 case CHIP_VEGA10:
2277 chip_name = "vega10";
2278 break;
3f76dced
AD
2279 case CHIP_VEGA12:
2280 chip_name = "vega12";
2281 break;
2d2e5e7e 2282 case CHIP_RAVEN:
54f78a76 2283 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
54c4d17e 2284 chip_name = "raven2";
54f78a76 2285 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
741deade 2286 chip_name = "picasso";
54c4d17e
FX
2287 else
2288 chip_name = "raven";
2d2e5e7e 2289 break;
65e60f6e
LM
2290 case CHIP_ARCTURUS:
2291 chip_name = "arcturus";
2292 break;
42b325e5
XY
2293 case CHIP_NAVI12:
2294 chip_name = "navi12";
2295 break;
e2a75f88
AD
2296 }
2297
2298 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
b31d3063 2299 err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, fw_name);
e2a75f88
AD
2300 if (err) {
2301 dev_err(adev->dev,
b31d3063 2302 "Failed to get gpu_info firmware \"%s\"\n",
e2a75f88
AD
2303 fw_name);
2304 goto out;
2305 }
2306
ab4fe3e1 2307 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
e2a75f88
AD
2308 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
2309
2310 switch (hdr->version_major) {
2311 case 1:
2312 {
2313 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
ab4fe3e1 2314 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
e2a75f88
AD
2315 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2316
cc375d8c
TY
2317 /*
2318 * Should be droped when DAL no longer needs it.
2319 */
2320 if (adev->asic_type == CHIP_NAVI12)
ec51d3fa
XY
2321 goto parse_soc_bounding_box;
2322
b5ab16bf
AD
2323 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2324 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2325 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2326 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
e2a75f88 2327 adev->gfx.config.max_texture_channel_caches =
b5ab16bf
AD
2328 le32_to_cpu(gpu_info_fw->gc_num_tccs);
2329 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2330 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2331 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2332 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
e2a75f88 2333 adev->gfx.config.double_offchip_lds_buf =
b5ab16bf
AD
2334 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2335 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
51fd0370
HZ
2336 adev->gfx.cu_info.max_waves_per_simd =
2337 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2338 adev->gfx.cu_info.max_scratch_slots_per_cu =
2339 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2340 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
48321c3d 2341 if (hdr->version_minor >= 1) {
35c2e910
HZ
2342 const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2343 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2344 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2345 adev->gfx.config.num_sc_per_sh =
2346 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2347 adev->gfx.config.num_packer_per_sc =
2348 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2349 }
ec51d3fa
XY
2350
2351parse_soc_bounding_box:
ec51d3fa
XY
2352 /*
2353 * soc bounding box info is not integrated in disocovery table,
258620d0 2354 * we always need to parse it from gpu info firmware if needed.
ec51d3fa 2355 */
48321c3d
HW
2356 if (hdr->version_minor == 2) {
2357 const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2358 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2359 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2360 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2361 }
e2a75f88
AD
2362 break;
2363 }
2364 default:
2365 dev_err(adev->dev,
2366 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2367 err = -EINVAL;
2368 goto out;
2369 }
2370out:
e2a75f88
AD
2371 return err;
2372}
2373
e3ecdffa
AD
2374/**
2375 * amdgpu_device_ip_early_init - run early init for hardware IPs
2376 *
2377 * @adev: amdgpu_device pointer
2378 *
2379 * Early initialization pass for hardware IPs. The hardware IPs that make
2380 * up each asic are discovered each IP's early_init callback is run. This
2381 * is the first stage in initializing the asic.
2382 * Returns 0 on success, negative error code on failure.
2383 */
06ec9070 2384static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
d38ceaf9 2385{
901e2be2 2386 struct pci_dev *parent;
aaa36a97 2387 int i, r;
ced69502 2388 bool total;
d38ceaf9 2389
483ef985 2390 amdgpu_device_enable_virtual_display(adev);
a6be7570 2391
00a979f3 2392 if (amdgpu_sriov_vf(adev)) {
00a979f3 2393 r = amdgpu_virt_request_full_gpu(adev, true);
aaa36a97
AD
2394 if (r)
2395 return r;
00a979f3
WS
2396 }
2397
d38ceaf9 2398 switch (adev->asic_type) {
33f34802
KW
2399#ifdef CONFIG_DRM_AMDGPU_SI
2400 case CHIP_VERDE:
2401 case CHIP_TAHITI:
2402 case CHIP_PITCAIRN:
2403 case CHIP_OLAND:
2404 case CHIP_HAINAN:
295d0daf 2405 adev->family = AMDGPU_FAMILY_SI;
33f34802
KW
2406 r = si_set_ip_blocks(adev);
2407 if (r)
2408 return r;
2409 break;
2410#endif
a2e73f56
AD
2411#ifdef CONFIG_DRM_AMDGPU_CIK
2412 case CHIP_BONAIRE:
2413 case CHIP_HAWAII:
2414 case CHIP_KAVERI:
2415 case CHIP_KABINI:
2416 case CHIP_MULLINS:
e1ad2d53 2417 if (adev->flags & AMD_IS_APU)
a2e73f56 2418 adev->family = AMDGPU_FAMILY_KV;
e1ad2d53
AD
2419 else
2420 adev->family = AMDGPU_FAMILY_CI;
a2e73f56
AD
2421
2422 r = cik_set_ip_blocks(adev);
2423 if (r)
2424 return r;
2425 break;
2426#endif
da87c30b
AD
2427 case CHIP_TOPAZ:
2428 case CHIP_TONGA:
2429 case CHIP_FIJI:
2430 case CHIP_POLARIS10:
2431 case CHIP_POLARIS11:
2432 case CHIP_POLARIS12:
2433 case CHIP_VEGAM:
2434 case CHIP_CARRIZO:
2435 case CHIP_STONEY:
2436 if (adev->flags & AMD_IS_APU)
2437 adev->family = AMDGPU_FAMILY_CZ;
2438 else
2439 adev->family = AMDGPU_FAMILY_VI;
2440
2441 r = vi_set_ip_blocks(adev);
2442 if (r)
2443 return r;
2444 break;
d38ceaf9 2445 default:
63352b7f
AD
2446 r = amdgpu_discovery_set_ip_blocks(adev);
2447 if (r)
2448 return r;
2449 break;
d38ceaf9
AD
2450 }
2451
901e2be2
AD
2452 if (amdgpu_has_atpx() &&
2453 (amdgpu_is_atpx_hybrid() ||
2454 amdgpu_has_atpx_dgpu_power_cntl()) &&
2455 ((adev->flags & AMD_IS_APU) == 0) &&
7b1c6263 2456 !dev_is_removable(&adev->pdev->dev))
901e2be2
AD
2457 adev->flags |= AMD_IS_PX;
2458
85ac2021 2459 if (!(adev->flags & AMD_IS_APU)) {
c4c8955b 2460 parent = pcie_find_root_port(adev->pdev);
85ac2021
AD
2461 adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2462 }
901e2be2 2463
1884734a 2464
3b94fb10 2465 adev->pm.pp_feature = amdgpu_pp_feature_mask;
a35ad98b 2466 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
00544006 2467 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
4215a119
HC
2468 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2469 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
d9b3a066 2470 if (!amdgpu_device_pcie_dynamic_switching_supported(adev))
fbf1035b 2471 adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
00f54b97 2472
ced69502 2473 total = true;
d38ceaf9
AD
2474 for (i = 0; i < adev->num_ip_blocks; i++) {
2475 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
0c451baf 2476 DRM_WARN("disabled ip block: %d <%s>\n",
ed8cf00c 2477 i, adev->ip_blocks[i].version->funcs->name);
a1255107 2478 adev->ip_blocks[i].status.valid = false;
d38ceaf9 2479 } else {
a1255107
AD
2480 if (adev->ip_blocks[i].version->funcs->early_init) {
2481 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2c1a2784 2482 if (r == -ENOENT) {
a1255107 2483 adev->ip_blocks[i].status.valid = false;
2c1a2784 2484 } else if (r) {
a1255107
AD
2485 DRM_ERROR("early_init of IP block <%s> failed %d\n",
2486 adev->ip_blocks[i].version->funcs->name, r);
ced69502 2487 total = false;
2c1a2784 2488 } else {
a1255107 2489 adev->ip_blocks[i].status.valid = true;
2c1a2784 2490 }
974e6b64 2491 } else {
a1255107 2492 adev->ip_blocks[i].status.valid = true;
d38ceaf9 2493 }
d38ceaf9 2494 }
21a249ca
AD
2495 /* get the vbios after the asic_funcs are set up */
2496 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
6e29c227
AD
2497 r = amdgpu_device_parse_gpu_info_fw(adev);
2498 if (r)
2499 return r;
2500
21a249ca 2501 /* Read BIOS */
9535a86a
SZ
2502 if (amdgpu_device_read_bios(adev)) {
2503 if (!amdgpu_get_bios(adev))
2504 return -EINVAL;
21a249ca 2505
9535a86a
SZ
2506 r = amdgpu_atombios_init(adev);
2507 if (r) {
2508 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2509 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2510 return r;
2511 }
21a249ca 2512 }
77eabc6f
PJZ
2513
2514 /*get pf2vf msg info at it's earliest time*/
2515 if (amdgpu_sriov_vf(adev))
2516 amdgpu_virt_init_data_exchange(adev);
2517
21a249ca 2518 }
d38ceaf9 2519 }
ced69502
ML
2520 if (!total)
2521 return -ENODEV;
d38ceaf9 2522
00fa4035 2523 amdgpu_amdkfd_device_probe(adev);
395d1fb9
NH
2524 adev->cg_flags &= amdgpu_cg_mask;
2525 adev->pg_flags &= amdgpu_pg_mask;
2526
d38ceaf9
AD
2527 return 0;
2528}
2529
0a4f2520
RZ
2530static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2531{
2532 int i, r;
2533
2534 for (i = 0; i < adev->num_ip_blocks; i++) {
2535 if (!adev->ip_blocks[i].status.sw)
2536 continue;
2537 if (adev->ip_blocks[i].status.hw)
2538 continue;
2539 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2d11fd3f 2540 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
0a4f2520
RZ
2541 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2542 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2543 if (r) {
2544 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2545 adev->ip_blocks[i].version->funcs->name, r);
2546 return r;
2547 }
2548 adev->ip_blocks[i].status.hw = true;
2549 }
2550 }
2551
2552 return 0;
2553}
2554
2555static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2556{
2557 int i, r;
2558
2559 for (i = 0; i < adev->num_ip_blocks; i++) {
2560 if (!adev->ip_blocks[i].status.sw)
2561 continue;
2562 if (adev->ip_blocks[i].status.hw)
2563 continue;
2564 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2565 if (r) {
2566 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2567 adev->ip_blocks[i].version->funcs->name, r);
2568 return r;
2569 }
2570 adev->ip_blocks[i].status.hw = true;
2571 }
2572
2573 return 0;
2574}
2575
7a3e0bb2
RZ
2576static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2577{
2578 int r = 0;
2579 int i;
80f41f84 2580 uint32_t smu_version;
7a3e0bb2
RZ
2581
2582 if (adev->asic_type >= CHIP_VEGA10) {
2583 for (i = 0; i < adev->num_ip_blocks; i++) {
482f0e53
ML
2584 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2585 continue;
2586
e3c1b071 2587 if (!adev->ip_blocks[i].status.sw)
2588 continue;
2589
482f0e53
ML
2590 /* no need to do the fw loading again if already done*/
2591 if (adev->ip_blocks[i].status.hw == true)
2592 break;
2593
53b3f8f4 2594 if (amdgpu_in_reset(adev) || adev->in_suspend) {
482f0e53
ML
2595 r = adev->ip_blocks[i].version->funcs->resume(adev);
2596 if (r) {
2597 DRM_ERROR("resume of IP block <%s> failed %d\n",
7a3e0bb2 2598 adev->ip_blocks[i].version->funcs->name, r);
482f0e53
ML
2599 return r;
2600 }
2601 } else {
2602 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2603 if (r) {
2604 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2605 adev->ip_blocks[i].version->funcs->name, r);
2606 return r;
7a3e0bb2 2607 }
7a3e0bb2 2608 }
482f0e53
ML
2609
2610 adev->ip_blocks[i].status.hw = true;
2611 break;
7a3e0bb2
RZ
2612 }
2613 }
482f0e53 2614
8973d9ec
ED
2615 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2616 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
7a3e0bb2 2617
80f41f84 2618 return r;
7a3e0bb2
RZ
2619}
2620
5fd8518d
AG
2621static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2622{
2623 long timeout;
2624 int r, i;
2625
2626 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2627 struct amdgpu_ring *ring = adev->rings[i];
2628
2629 /* No need to setup the GPU scheduler for rings that don't need it */
2630 if (!ring || ring->no_scheduler)
2631 continue;
2632
2633 switch (ring->funcs->type) {
2634 case AMDGPU_RING_TYPE_GFX:
2635 timeout = adev->gfx_timeout;
2636 break;
2637 case AMDGPU_RING_TYPE_COMPUTE:
2638 timeout = adev->compute_timeout;
2639 break;
2640 case AMDGPU_RING_TYPE_SDMA:
2641 timeout = adev->sdma_timeout;
2642 break;
2643 default:
2644 timeout = adev->video_timeout;
2645 break;
2646 }
2647
a6149f03 2648 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, NULL,
56e44960 2649 DRM_SCHED_PRIORITY_COUNT,
11f25c84 2650 ring->num_hw_submission, 0,
8ab62eda
JG
2651 timeout, adev->reset_domain->wq,
2652 ring->sched_score, ring->name,
2653 adev->dev);
5fd8518d
AG
2654 if (r) {
2655 DRM_ERROR("Failed to create scheduler on ring %s.\n",
2656 ring->name);
2657 return r;
2658 }
037b98a2
AD
2659 r = amdgpu_uvd_entity_init(adev, ring);
2660 if (r) {
2661 DRM_ERROR("Failed to create UVD scheduling entity on ring %s.\n",
2662 ring->name);
2663 return r;
2664 }
2665 r = amdgpu_vce_entity_init(adev, ring);
2666 if (r) {
2667 DRM_ERROR("Failed to create VCE scheduling entity on ring %s.\n",
2668 ring->name);
2669 return r;
2670 }
5fd8518d
AG
2671 }
2672
d425c6f4
JZ
2673 amdgpu_xcp_update_partition_sched_list(adev);
2674
5fd8518d
AG
2675 return 0;
2676}
2677
2678
e3ecdffa
AD
2679/**
2680 * amdgpu_device_ip_init - run init for hardware IPs
2681 *
2682 * @adev: amdgpu_device pointer
2683 *
2684 * Main initialization pass for hardware IPs. The list of all the hardware
2685 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2686 * are run. sw_init initializes the software state associated with each IP
2687 * and hw_init initializes the hardware associated with each IP.
2688 * Returns 0 on success, negative error code on failure.
2689 */
06ec9070 2690static int amdgpu_device_ip_init(struct amdgpu_device *adev)
d38ceaf9
AD
2691{
2692 int i, r;
2693
c030f2e4 2694 r = amdgpu_ras_init(adev);
2695 if (r)
2696 return r;
2697
d38ceaf9 2698 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2699 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 2700 continue;
a1255107 2701 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2c1a2784 2702 if (r) {
a1255107
AD
2703 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2704 adev->ip_blocks[i].version->funcs->name, r);
72d3f592 2705 goto init_failed;
2c1a2784 2706 }
a1255107 2707 adev->ip_blocks[i].status.sw = true;
bfca0289 2708
c1c39032
AD
2709 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2710 /* need to do common hw init early so everything is set up for gmc */
2711 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2712 if (r) {
2713 DRM_ERROR("hw_init %d failed %d\n", i, r);
2714 goto init_failed;
2715 }
2716 adev->ip_blocks[i].status.hw = true;
2717 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2718 /* need to do gmc hw init early so we can allocate gpu mem */
892deb48
VS
2719 /* Try to reserve bad pages early */
2720 if (amdgpu_sriov_vf(adev))
2721 amdgpu_virt_exchange_data(adev);
2722
7ccfd79f 2723 r = amdgpu_device_mem_scratch_init(adev);
2c1a2784 2724 if (r) {
7ccfd79f 2725 DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
72d3f592 2726 goto init_failed;
2c1a2784 2727 }
a1255107 2728 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784
AD
2729 if (r) {
2730 DRM_ERROR("hw_init %d failed %d\n", i, r);
72d3f592 2731 goto init_failed;
2c1a2784 2732 }
06ec9070 2733 r = amdgpu_device_wb_init(adev);
2c1a2784 2734 if (r) {
06ec9070 2735 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
72d3f592 2736 goto init_failed;
2c1a2784 2737 }
a1255107 2738 adev->ip_blocks[i].status.hw = true;
2493664f
ML
2739
2740 /* right after GMC hw init, we create CSA */
02ff519e 2741 if (adev->gfx.mcbp) {
1e256e27 2742 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
58ab2c08
CK
2743 AMDGPU_GEM_DOMAIN_VRAM |
2744 AMDGPU_GEM_DOMAIN_GTT,
2745 AMDGPU_CSA_SIZE);
2493664f
ML
2746 if (r) {
2747 DRM_ERROR("allocate CSA failed %d\n", r);
72d3f592 2748 goto init_failed;
2493664f
ML
2749 }
2750 }
c8031019
APS
2751
2752 r = amdgpu_seq64_init(adev);
2753 if (r) {
2754 DRM_ERROR("allocate seq64 failed %d\n", r);
2755 goto init_failed;
2756 }
d38ceaf9
AD
2757 }
2758 }
2759
c9ffa427 2760 if (amdgpu_sriov_vf(adev))
22c16d25 2761 amdgpu_virt_init_data_exchange(adev);
c9ffa427 2762
533aed27
AG
2763 r = amdgpu_ib_pool_init(adev);
2764 if (r) {
2765 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2766 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2767 goto init_failed;
2768 }
2769
c8963ea4
RZ
2770 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2771 if (r)
72d3f592 2772 goto init_failed;
0a4f2520
RZ
2773
2774 r = amdgpu_device_ip_hw_init_phase1(adev);
2775 if (r)
72d3f592 2776 goto init_failed;
0a4f2520 2777
7a3e0bb2
RZ
2778 r = amdgpu_device_fw_loading(adev);
2779 if (r)
72d3f592 2780 goto init_failed;
7a3e0bb2 2781
0a4f2520
RZ
2782 r = amdgpu_device_ip_hw_init_phase2(adev);
2783 if (r)
72d3f592 2784 goto init_failed;
d38ceaf9 2785
121a2bc6
AG
2786 /*
2787 * retired pages will be loaded from eeprom and reserved here,
2788 * it should be called after amdgpu_device_ip_hw_init_phase2 since
2789 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2790 * for I2C communication which only true at this point.
b82e65a9
GC
2791 *
2792 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2793 * failure from bad gpu situation and stop amdgpu init process
2794 * accordingly. For other failed cases, it will still release all
2795 * the resource and print error message, rather than returning one
2796 * negative value to upper level.
121a2bc6
AG
2797 *
2798 * Note: theoretically, this should be called before all vram allocations
2799 * to protect retired page from abusing
2800 */
b82e65a9
GC
2801 r = amdgpu_ras_recovery_init(adev);
2802 if (r)
2803 goto init_failed;
121a2bc6 2804
cfbb6b00
AG
2805 /**
2806 * In case of XGMI grab extra reference for reset domain for this device
2807 */
a4c63caf 2808 if (adev->gmc.xgmi.num_physical_nodes > 1) {
cfbb6b00 2809 if (amdgpu_xgmi_add_device(adev) == 0) {
46c67660 2810 if (!amdgpu_sriov_vf(adev)) {
2efc30f0
VC
2811 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2812
dfd0287b
LH
2813 if (WARN_ON(!hive)) {
2814 r = -ENOENT;
2815 goto init_failed;
2816 }
2817
46c67660 2818 if (!hive->reset_domain ||
2819 !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2820 r = -ENOENT;
2821 amdgpu_put_xgmi_hive(hive);
2822 goto init_failed;
2823 }
2824
2825 /* Drop the early temporary reset domain we created for device */
2826 amdgpu_reset_put_reset_domain(adev->reset_domain);
2827 adev->reset_domain = hive->reset_domain;
9dfa4860 2828 amdgpu_put_xgmi_hive(hive);
cfbb6b00 2829 }
a4c63caf
AG
2830 }
2831 }
2832
5fd8518d
AG
2833 r = amdgpu_device_init_schedulers(adev);
2834 if (r)
2835 goto init_failed;
e3c1b071 2836
b7043800
AD
2837 if (adev->mman.buffer_funcs_ring->sched.ready)
2838 amdgpu_ttm_set_buffer_funcs_status(adev, true);
2839
e3c1b071 2840 /* Don't init kfd if whole hive need to be reset during init */
84b4dd3f
PY
2841 if (!adev->gmc.xgmi.pending_reset) {
2842 kgd2kfd_init_zone_device(adev);
e3c1b071 2843 amdgpu_amdkfd_device_init(adev);
84b4dd3f 2844 }
c6332b97 2845
bd607166
KR
2846 amdgpu_fru_get_product_info(adev);
2847
72d3f592 2848init_failed:
c6332b97 2849
72d3f592 2850 return r;
d38ceaf9
AD
2851}
2852
e3ecdffa
AD
2853/**
2854 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2855 *
2856 * @adev: amdgpu_device pointer
2857 *
2858 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
2859 * this function before a GPU reset. If the value is retained after a
2860 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
2861 */
06ec9070 2862static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
0c49e0b8
CZ
2863{
2864 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2865}
2866
e3ecdffa
AD
2867/**
2868 * amdgpu_device_check_vram_lost - check if vram is valid
2869 *
2870 * @adev: amdgpu_device pointer
2871 *
2872 * Checks the reset magic value written to the gart pointer in VRAM.
2873 * The driver calls this after a GPU reset to see if the contents of
2874 * VRAM is lost or now.
2875 * returns true if vram is lost, false if not.
2876 */
06ec9070 2877static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
0c49e0b8 2878{
dadce777
EQ
2879 if (memcmp(adev->gart.ptr, adev->reset_magic,
2880 AMDGPU_RESET_MAGIC_NUM))
2881 return true;
2882
53b3f8f4 2883 if (!amdgpu_in_reset(adev))
dadce777
EQ
2884 return false;
2885
2886 /*
2887 * For all ASICs with baco/mode1 reset, the VRAM is
2888 * always assumed to be lost.
2889 */
2890 switch (amdgpu_asic_reset_method(adev)) {
2891 case AMD_RESET_METHOD_BACO:
2892 case AMD_RESET_METHOD_MODE1:
2893 return true;
2894 default:
2895 return false;
2896 }
0c49e0b8
CZ
2897}
2898
e3ecdffa 2899/**
1112a46b 2900 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
e3ecdffa
AD
2901 *
2902 * @adev: amdgpu_device pointer
b8b72130 2903 * @state: clockgating state (gate or ungate)
e3ecdffa 2904 *
e3ecdffa 2905 * The list of all the hardware IPs that make up the asic is walked and the
1112a46b
RZ
2906 * set_clockgating_state callbacks are run.
2907 * Late initialization pass enabling clockgating for hardware IPs.
2908 * Fini or suspend, pass disabling clockgating for hardware IPs.
e3ecdffa
AD
2909 * Returns 0 on success, negative error code on failure.
2910 */
fdd34271 2911
5d89bb2d
LL
2912int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2913 enum amd_clockgating_state state)
d38ceaf9 2914{
1112a46b 2915 int i, j, r;
d38ceaf9 2916
4a2ba394
SL
2917 if (amdgpu_emu_mode == 1)
2918 return 0;
2919
1112a46b
RZ
2920 for (j = 0; j < adev->num_ip_blocks; j++) {
2921 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
a2d31dc3 2922 if (!adev->ip_blocks[i].status.late_initialized)
d38ceaf9 2923 continue;
47198eb7 2924 /* skip CG for GFX, SDMA on S0ix */
5d70a549 2925 if (adev->in_s0ix &&
47198eb7
AD
2926 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2927 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
5d70a549 2928 continue;
4a446d55 2929 /* skip CG for VCE/UVD, it's handled specially */
a1255107 2930 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
57716327 2931 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
34319b32 2932 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
52f2e779 2933 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
57716327 2934 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
4a446d55 2935 /* enable clockgating to save power */
a1255107 2936 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1112a46b 2937 state);
4a446d55
AD
2938 if (r) {
2939 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
a1255107 2940 adev->ip_blocks[i].version->funcs->name, r);
4a446d55
AD
2941 return r;
2942 }
b0b00ff1 2943 }
d38ceaf9 2944 }
06b18f61 2945
c9f96fd5
RZ
2946 return 0;
2947}
2948
5d89bb2d
LL
2949int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2950 enum amd_powergating_state state)
c9f96fd5 2951{
1112a46b 2952 int i, j, r;
06b18f61 2953
c9f96fd5
RZ
2954 if (amdgpu_emu_mode == 1)
2955 return 0;
2956
1112a46b
RZ
2957 for (j = 0; j < adev->num_ip_blocks; j++) {
2958 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
a2d31dc3 2959 if (!adev->ip_blocks[i].status.late_initialized)
c9f96fd5 2960 continue;
47198eb7 2961 /* skip PG for GFX, SDMA on S0ix */
5d70a549 2962 if (adev->in_s0ix &&
47198eb7
AD
2963 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2964 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
5d70a549 2965 continue;
c9f96fd5
RZ
2966 /* skip CG for VCE/UVD, it's handled specially */
2967 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2968 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2969 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
52f2e779 2970 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
c9f96fd5
RZ
2971 adev->ip_blocks[i].version->funcs->set_powergating_state) {
2972 /* enable powergating to save power */
2973 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
1112a46b 2974 state);
c9f96fd5
RZ
2975 if (r) {
2976 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2977 adev->ip_blocks[i].version->funcs->name, r);
2978 return r;
2979 }
2980 }
2981 }
2dc80b00
S
2982 return 0;
2983}
2984
beff74bc
AD
2985static int amdgpu_device_enable_mgpu_fan_boost(void)
2986{
2987 struct amdgpu_gpu_instance *gpu_ins;
2988 struct amdgpu_device *adev;
2989 int i, ret = 0;
2990
2991 mutex_lock(&mgpu_info.mutex);
2992
2993 /*
2994 * MGPU fan boost feature should be enabled
2995 * only when there are two or more dGPUs in
2996 * the system
2997 */
2998 if (mgpu_info.num_dgpu < 2)
2999 goto out;
3000
3001 for (i = 0; i < mgpu_info.num_dgpu; i++) {
3002 gpu_ins = &(mgpu_info.gpu_ins[i]);
3003 adev = gpu_ins->adev;
3004 if (!(adev->flags & AMD_IS_APU) &&
f10bb940 3005 !gpu_ins->mgpu_fan_enabled) {
beff74bc
AD
3006 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
3007 if (ret)
3008 break;
3009
3010 gpu_ins->mgpu_fan_enabled = 1;
3011 }
3012 }
3013
3014out:
3015 mutex_unlock(&mgpu_info.mutex);
3016
3017 return ret;
3018}
3019
e3ecdffa
AD
3020/**
3021 * amdgpu_device_ip_late_init - run late init for hardware IPs
3022 *
3023 * @adev: amdgpu_device pointer
3024 *
3025 * Late initialization pass for hardware IPs. The list of all the hardware
3026 * IPs that make up the asic is walked and the late_init callbacks are run.
3027 * late_init covers any special initialization that an IP requires
3028 * after all of the have been initialized or something that needs to happen
3029 * late in the init process.
3030 * Returns 0 on success, negative error code on failure.
3031 */
06ec9070 3032static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2dc80b00 3033{
60599a03 3034 struct amdgpu_gpu_instance *gpu_instance;
2dc80b00
S
3035 int i = 0, r;
3036
3037 for (i = 0; i < adev->num_ip_blocks; i++) {
73f847db 3038 if (!adev->ip_blocks[i].status.hw)
2dc80b00
S
3039 continue;
3040 if (adev->ip_blocks[i].version->funcs->late_init) {
3041 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
3042 if (r) {
3043 DRM_ERROR("late_init of IP block <%s> failed %d\n",
3044 adev->ip_blocks[i].version->funcs->name, r);
3045 return r;
3046 }
2dc80b00 3047 }
73f847db 3048 adev->ip_blocks[i].status.late_initialized = true;
2dc80b00
S
3049 }
3050
867e24ca 3051 r = amdgpu_ras_late_init(adev);
3052 if (r) {
3053 DRM_ERROR("amdgpu_ras_late_init failed %d", r);
3054 return r;
3055 }
3056
a891d239
DL
3057 amdgpu_ras_set_error_query_ready(adev, true);
3058
1112a46b
RZ
3059 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
3060 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
916ac57f 3061
06ec9070 3062 amdgpu_device_fill_reset_magic(adev);
d38ceaf9 3063
beff74bc
AD
3064 r = amdgpu_device_enable_mgpu_fan_boost();
3065 if (r)
3066 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
3067
4da8b639 3068 /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
47fc644f
SS
3069 if (amdgpu_passthrough(adev) &&
3070 ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) ||
3071 adev->asic_type == CHIP_ALDEBARAN))
bc143d8b 3072 amdgpu_dpm_handle_passthrough_sbr(adev, true);
60599a03
EQ
3073
3074 if (adev->gmc.xgmi.num_physical_nodes > 1) {
3075 mutex_lock(&mgpu_info.mutex);
3076
3077 /*
3078 * Reset device p-state to low as this was booted with high.
3079 *
3080 * This should be performed only after all devices from the same
3081 * hive get initialized.
3082 *
3083 * However, it's unknown how many device in the hive in advance.
3084 * As this is counted one by one during devices initializations.
3085 *
3086 * So, we wait for all XGMI interlinked devices initialized.
3087 * This may bring some delays as those devices may come from
3088 * different hives. But that should be OK.
3089 */
3090 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
3091 for (i = 0; i < mgpu_info.num_gpu; i++) {
3092 gpu_instance = &(mgpu_info.gpu_ins[i]);
3093 if (gpu_instance->adev->flags & AMD_IS_APU)
3094 continue;
3095
d84a430d
JK
3096 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
3097 AMDGPU_XGMI_PSTATE_MIN);
60599a03
EQ
3098 if (r) {
3099 DRM_ERROR("pstate setting failed (%d).\n", r);
3100 break;
3101 }
3102 }
3103 }
3104
3105 mutex_unlock(&mgpu_info.mutex);
3106 }
3107
d38ceaf9
AD
3108 return 0;
3109}
3110
613aa3ea
LY
3111/**
3112 * amdgpu_device_smu_fini_early - smu hw_fini wrapper
3113 *
3114 * @adev: amdgpu_device pointer
3115 *
3116 * For ASICs need to disable SMC first
3117 */
3118static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
3119{
3120 int i, r;
3121
4e8303cf 3122 if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
613aa3ea
LY
3123 return;
3124
3125 for (i = 0; i < adev->num_ip_blocks; i++) {
3126 if (!adev->ip_blocks[i].status.hw)
3127 continue;
3128 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3129 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3130 /* XXX handle errors */
3131 if (r) {
3132 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
3133 adev->ip_blocks[i].version->funcs->name, r);
3134 }
3135 adev->ip_blocks[i].status.hw = false;
3136 break;
3137 }
3138 }
3139}
3140
e9669fb7 3141static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
d38ceaf9
AD
3142{
3143 int i, r;
3144
e9669fb7
AG
3145 for (i = 0; i < adev->num_ip_blocks; i++) {
3146 if (!adev->ip_blocks[i].version->funcs->early_fini)
3147 continue;
5278a159 3148
e9669fb7
AG
3149 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
3150 if (r) {
3151 DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
3152 adev->ip_blocks[i].version->funcs->name, r);
3153 }
3154 }
c030f2e4 3155
05df1f01 3156 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
fdd34271
RZ
3157 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
3158
7270e895
TY
3159 amdgpu_amdkfd_suspend(adev, false);
3160
613aa3ea
LY
3161 /* Workaroud for ASICs need to disable SMC first */
3162 amdgpu_device_smu_fini_early(adev);
3e96dbfd 3163
d38ceaf9 3164 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 3165 if (!adev->ip_blocks[i].status.hw)
d38ceaf9 3166 continue;
8201a67a 3167
a1255107 3168 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
d38ceaf9 3169 /* XXX handle errors */
2c1a2784 3170 if (r) {
a1255107
AD
3171 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
3172 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 3173 }
8201a67a 3174
a1255107 3175 adev->ip_blocks[i].status.hw = false;
d38ceaf9
AD
3176 }
3177
6effad8a
GC
3178 if (amdgpu_sriov_vf(adev)) {
3179 if (amdgpu_virt_release_full_gpu(adev, false))
3180 DRM_ERROR("failed to release exclusive mode on fini\n");
3181 }
3182
e9669fb7
AG
3183 return 0;
3184}
3185
3186/**
3187 * amdgpu_device_ip_fini - run fini for hardware IPs
3188 *
3189 * @adev: amdgpu_device pointer
3190 *
3191 * Main teardown pass for hardware IPs. The list of all the hardware
3192 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
3193 * are run. hw_fini tears down the hardware associated with each IP
3194 * and sw_fini tears down any software state associated with each IP.
3195 * Returns 0 on success, negative error code on failure.
3196 */
3197static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
3198{
3199 int i, r;
3200
3201 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
3202 amdgpu_virt_release_ras_err_handler_data(adev);
3203
e9669fb7
AG
3204 if (adev->gmc.xgmi.num_physical_nodes > 1)
3205 amdgpu_xgmi_remove_device(adev);
3206
c004d44e 3207 amdgpu_amdkfd_device_fini_sw(adev);
9950cda2 3208
d38ceaf9 3209 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 3210 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 3211 continue;
c12aba3a
ML
3212
3213 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
c8963ea4 3214 amdgpu_ucode_free_bo(adev);
1e256e27 3215 amdgpu_free_static_csa(&adev->virt.csa_obj);
c12aba3a 3216 amdgpu_device_wb_fini(adev);
7ccfd79f 3217 amdgpu_device_mem_scratch_fini(adev);
533aed27 3218 amdgpu_ib_pool_fini(adev);
c8031019 3219 amdgpu_seq64_fini(adev);
c12aba3a
ML
3220 }
3221
a1255107 3222 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
d38ceaf9 3223 /* XXX handle errors */
2c1a2784 3224 if (r) {
a1255107
AD
3225 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
3226 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 3227 }
a1255107
AD
3228 adev->ip_blocks[i].status.sw = false;
3229 adev->ip_blocks[i].status.valid = false;
d38ceaf9
AD
3230 }
3231
a6dcfd9c 3232 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 3233 if (!adev->ip_blocks[i].status.late_initialized)
8a2eef1d 3234 continue;
a1255107
AD
3235 if (adev->ip_blocks[i].version->funcs->late_fini)
3236 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
3237 adev->ip_blocks[i].status.late_initialized = false;
a6dcfd9c
ML
3238 }
3239
c030f2e4 3240 amdgpu_ras_fini(adev);
3241
d38ceaf9
AD
3242 return 0;
3243}
3244
e3ecdffa 3245/**
beff74bc 3246 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
e3ecdffa 3247 *
1112a46b 3248 * @work: work_struct.
e3ecdffa 3249 */
beff74bc 3250static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2dc80b00
S
3251{
3252 struct amdgpu_device *adev =
beff74bc 3253 container_of(work, struct amdgpu_device, delayed_init_work.work);
916ac57f
RZ
3254 int r;
3255
3256 r = amdgpu_ib_ring_tests(adev);
3257 if (r)
3258 DRM_ERROR("ib ring test failed (%d).\n", r);
2dc80b00
S
3259}
3260
1e317b99
RZ
3261static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
3262{
3263 struct amdgpu_device *adev =
3264 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
3265
90a92662
MD
3266 WARN_ON_ONCE(adev->gfx.gfx_off_state);
3267 WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
3268
3269 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
3270 adev->gfx.gfx_off_state = true;
1e317b99
RZ
3271}
3272
e3ecdffa 3273/**
e7854a03 3274 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
e3ecdffa
AD
3275 *
3276 * @adev: amdgpu_device pointer
3277 *
3278 * Main suspend function for hardware IPs. The list of all the hardware
3279 * IPs that make up the asic is walked, clockgating is disabled and the
3280 * suspend callbacks are run. suspend puts the hardware and software state
3281 * in each IP into a state suitable for suspend.
3282 * Returns 0 on success, negative error code on failure.
3283 */
e7854a03
AD
3284static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
3285{
3286 int i, r;
3287
50ec83f0
AD
3288 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
3289 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
05df1f01 3290
b31d6ada
EQ
3291 /*
3292 * Per PMFW team's suggestion, driver needs to handle gfxoff
3293 * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
3294 * scenario. Add the missing df cstate disablement here.
3295 */
3296 if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
3297 dev_warn(adev->dev, "Failed to disallow df cstate");
3298
e7854a03
AD
3299 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3300 if (!adev->ip_blocks[i].status.valid)
3301 continue;
2b9f7848 3302
e7854a03 3303 /* displays are handled separately */
2b9f7848
ND
3304 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
3305 continue;
3306
3307 /* XXX handle errors */
3308 r = adev->ip_blocks[i].version->funcs->suspend(adev);
3309 /* XXX handle errors */
3310 if (r) {
3311 DRM_ERROR("suspend of IP block <%s> failed %d\n",
3312 adev->ip_blocks[i].version->funcs->name, r);
3313 return r;
e7854a03 3314 }
2b9f7848
ND
3315
3316 adev->ip_blocks[i].status.hw = false;
e7854a03
AD
3317 }
3318
e7854a03
AD
3319 return 0;
3320}
3321
3322/**
3323 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
3324 *
3325 * @adev: amdgpu_device pointer
3326 *
3327 * Main suspend function for hardware IPs. The list of all the hardware
3328 * IPs that make up the asic is walked, clockgating is disabled and the
3329 * suspend callbacks are run. suspend puts the hardware and software state
3330 * in each IP into a state suitable for suspend.
3331 * Returns 0 on success, negative error code on failure.
3332 */
3333static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
d38ceaf9
AD
3334{
3335 int i, r;
3336
557f42a2 3337 if (adev->in_s0ix)
bc143d8b 3338 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
34416931 3339
d38ceaf9 3340 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 3341 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 3342 continue;
e7854a03
AD
3343 /* displays are handled in phase1 */
3344 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
3345 continue;
bff77e86
LM
3346 /* PSP lost connection when err_event_athub occurs */
3347 if (amdgpu_ras_intr_triggered() &&
3348 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3349 adev->ip_blocks[i].status.hw = false;
3350 continue;
3351 }
e3c1b071 3352
3353 /* skip unnecessary suspend if we do not initialize them yet */
3354 if (adev->gmc.xgmi.pending_reset &&
3355 !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3356 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
3357 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3358 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
3359 adev->ip_blocks[i].status.hw = false;
3360 continue;
3361 }
557f42a2 3362
afa6646b 3363 /* skip suspend of gfx/mes and psp for S0ix
32ff160d
AD
3364 * gfx is in gfxoff state, so on resume it will exit gfxoff just
3365 * like at runtime. PSP is also part of the always on hardware
3366 * so no need to suspend it.
3367 */
557f42a2 3368 if (adev->in_s0ix &&
32ff160d 3369 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
afa6646b
AD
3370 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3371 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
557f42a2
AD
3372 continue;
3373
2a7798ea
AD
3374 /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
3375 if (adev->in_s0ix &&
4e8303cf
LL
3376 (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >=
3377 IP_VERSION(5, 0, 0)) &&
3378 (adev->ip_blocks[i].version->type ==
3379 AMD_IP_BLOCK_TYPE_SDMA))
2a7798ea
AD
3380 continue;
3381
e11c7750
TH
3382 /* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.
3383 * These are in TMR, hence are expected to be reused by PSP-TOS to reload
3384 * from this location and RLC Autoload automatically also gets loaded
3385 * from here based on PMFW -> PSP message during re-init sequence.
3386 * Therefore, the psp suspend & resume should be skipped to avoid destroy
3387 * the TMR and reload FWs again for IMU enabled APU ASICs.
3388 */
3389 if (amdgpu_in_reset(adev) &&
3390 (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
3391 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3392 continue;
3393
d38ceaf9 3394 /* XXX handle errors */
a1255107 3395 r = adev->ip_blocks[i].version->funcs->suspend(adev);
d38ceaf9 3396 /* XXX handle errors */
2c1a2784 3397 if (r) {
a1255107
AD
3398 DRM_ERROR("suspend of IP block <%s> failed %d\n",
3399 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 3400 }
876923fb 3401 adev->ip_blocks[i].status.hw = false;
a3a09142 3402 /* handle putting the SMC in the appropriate state */
47fc644f 3403 if (!amdgpu_sriov_vf(adev)) {
86b93fd6
JZ
3404 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3405 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3406 if (r) {
3407 DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3408 adev->mp1_state, r);
3409 return r;
3410 }
a3a09142
AD
3411 }
3412 }
d38ceaf9
AD
3413 }
3414
3415 return 0;
3416}
3417
e7854a03
AD
3418/**
3419 * amdgpu_device_ip_suspend - run suspend for hardware IPs
3420 *
3421 * @adev: amdgpu_device pointer
3422 *
3423 * Main suspend function for hardware IPs. The list of all the hardware
3424 * IPs that make up the asic is walked, clockgating is disabled and the
3425 * suspend callbacks are run. suspend puts the hardware and software state
3426 * in each IP into a state suitable for suspend.
3427 * Returns 0 on success, negative error code on failure.
3428 */
3429int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3430{
3431 int r;
3432
3c73683c
JC
3433 if (amdgpu_sriov_vf(adev)) {
3434 amdgpu_virt_fini_data_exchange(adev);
e7819644 3435 amdgpu_virt_request_full_gpu(adev, false);
3c73683c 3436 }
e7819644 3437
b7043800
AD
3438 amdgpu_ttm_set_buffer_funcs_status(adev, false);
3439
e7854a03
AD
3440 r = amdgpu_device_ip_suspend_phase1(adev);
3441 if (r)
3442 return r;
3443 r = amdgpu_device_ip_suspend_phase2(adev);
3444
e7819644
YT
3445 if (amdgpu_sriov_vf(adev))
3446 amdgpu_virt_release_full_gpu(adev, false);
3447
e7854a03
AD
3448 return r;
3449}
3450
06ec9070 3451static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
a90ad3c2
ML
3452{
3453 int i, r;
3454
2cb681b6 3455 static enum amd_ip_block_type ip_order[] = {
2cb681b6 3456 AMD_IP_BLOCK_TYPE_COMMON,
c1c39032 3457 AMD_IP_BLOCK_TYPE_GMC,
39186aef 3458 AMD_IP_BLOCK_TYPE_PSP,
2cb681b6
ML
3459 AMD_IP_BLOCK_TYPE_IH,
3460 };
a90ad3c2 3461
95ea3dbc 3462 for (i = 0; i < adev->num_ip_blocks; i++) {
2cb681b6
ML
3463 int j;
3464 struct amdgpu_ip_block *block;
a90ad3c2 3465
4cd2a96d
J
3466 block = &adev->ip_blocks[i];
3467 block->status.hw = false;
2cb681b6 3468
4cd2a96d 3469 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
2cb681b6 3470
4cd2a96d 3471 if (block->version->type != ip_order[j] ||
2cb681b6
ML
3472 !block->status.valid)
3473 continue;
3474
3475 r = block->version->funcs->hw_init(adev);
0aaeefcc 3476 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
c41d1cf6
ML
3477 if (r)
3478 return r;
482f0e53 3479 block->status.hw = true;
a90ad3c2
ML
3480 }
3481 }
3482
3483 return 0;
3484}
3485
06ec9070 3486static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
a90ad3c2
ML
3487{
3488 int i, r;
3489
2cb681b6
ML
3490 static enum amd_ip_block_type ip_order[] = {
3491 AMD_IP_BLOCK_TYPE_SMC,
3492 AMD_IP_BLOCK_TYPE_DCE,
3493 AMD_IP_BLOCK_TYPE_GFX,
3494 AMD_IP_BLOCK_TYPE_SDMA,
ec64350d 3495 AMD_IP_BLOCK_TYPE_MES,
257deb8c 3496 AMD_IP_BLOCK_TYPE_UVD,
d83c7a07 3497 AMD_IP_BLOCK_TYPE_VCE,
d2cdc014
YZ
3498 AMD_IP_BLOCK_TYPE_VCN,
3499 AMD_IP_BLOCK_TYPE_JPEG
2cb681b6 3500 };
a90ad3c2 3501
2cb681b6
ML
3502 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3503 int j;
3504 struct amdgpu_ip_block *block;
a90ad3c2 3505
2cb681b6
ML
3506 for (j = 0; j < adev->num_ip_blocks; j++) {
3507 block = &adev->ip_blocks[j];
3508
3509 if (block->version->type != ip_order[i] ||
482f0e53
ML
3510 !block->status.valid ||
3511 block->status.hw)
2cb681b6
ML
3512 continue;
3513
895bd048
JZ
3514 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3515 r = block->version->funcs->resume(adev);
3516 else
3517 r = block->version->funcs->hw_init(adev);
3518
0aaeefcc 3519 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
c41d1cf6
ML
3520 if (r)
3521 return r;
482f0e53 3522 block->status.hw = true;
a90ad3c2
ML
3523 }
3524 }
3525
3526 return 0;
3527}
3528
e3ecdffa
AD
3529/**
3530 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3531 *
3532 * @adev: amdgpu_device pointer
3533 *
3534 * First resume function for hardware IPs. The list of all the hardware
3535 * IPs that make up the asic is walked and the resume callbacks are run for
3536 * COMMON, GMC, and IH. resume puts the hardware into a functional state
3537 * after a suspend and updates the software state as necessary. This
3538 * function is also used for restoring the GPU after a GPU reset.
3539 * Returns 0 on success, negative error code on failure.
3540 */
06ec9070 3541static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
d38ceaf9
AD
3542{
3543 int i, r;
3544
a90ad3c2 3545 for (i = 0; i < adev->num_ip_blocks; i++) {
482f0e53 3546 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
a90ad3c2 3547 continue;
a90ad3c2 3548 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
e3ecdffa 3549 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
d7274ec7
BZ
3550 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3551 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
482f0e53 3552
fcf0649f
CZ
3553 r = adev->ip_blocks[i].version->funcs->resume(adev);
3554 if (r) {
3555 DRM_ERROR("resume of IP block <%s> failed %d\n",
3556 adev->ip_blocks[i].version->funcs->name, r);
3557 return r;
3558 }
482f0e53 3559 adev->ip_blocks[i].status.hw = true;
a90ad3c2
ML
3560 }
3561 }
3562
3563 return 0;
3564}
3565
e3ecdffa
AD
3566/**
3567 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3568 *
3569 * @adev: amdgpu_device pointer
3570 *
3571 * First resume function for hardware IPs. The list of all the hardware
3572 * IPs that make up the asic is walked and the resume callbacks are run for
3573 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
3574 * functional state after a suspend and updates the software state as
3575 * necessary. This function is also used for restoring the GPU after a GPU
3576 * reset.
3577 * Returns 0 on success, negative error code on failure.
3578 */
06ec9070 3579static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
d38ceaf9
AD
3580{
3581 int i, r;
3582
3583 for (i = 0; i < adev->num_ip_blocks; i++) {
482f0e53 3584 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
d38ceaf9 3585 continue;
fcf0649f 3586 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
e3ecdffa 3587 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
7a3e0bb2
RZ
3588 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3589 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
fcf0649f 3590 continue;
a1255107 3591 r = adev->ip_blocks[i].version->funcs->resume(adev);
2c1a2784 3592 if (r) {
a1255107
AD
3593 DRM_ERROR("resume of IP block <%s> failed %d\n",
3594 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 3595 return r;
2c1a2784 3596 }
482f0e53 3597 adev->ip_blocks[i].status.hw = true;
d38ceaf9
AD
3598 }
3599
3600 return 0;
3601}
3602
e3ecdffa
AD
3603/**
3604 * amdgpu_device_ip_resume - run resume for hardware IPs
3605 *
3606 * @adev: amdgpu_device pointer
3607 *
3608 * Main resume function for hardware IPs. The hardware IPs
3609 * are split into two resume functions because they are
b8920e1e 3610 * also used in recovering from a GPU reset and some additional
e3ecdffa
AD
3611 * steps need to be take between them. In this case (S3/S4) they are
3612 * run sequentially.
3613 * Returns 0 on success, negative error code on failure.
3614 */
06ec9070 3615static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
fcf0649f
CZ
3616{
3617 int r;
3618
06ec9070 3619 r = amdgpu_device_ip_resume_phase1(adev);
fcf0649f
CZ
3620 if (r)
3621 return r;
7a3e0bb2
RZ
3622
3623 r = amdgpu_device_fw_loading(adev);
3624 if (r)
3625 return r;
3626
06ec9070 3627 r = amdgpu_device_ip_resume_phase2(adev);
fcf0649f 3628
b7043800
AD
3629 if (adev->mman.buffer_funcs_ring->sched.ready)
3630 amdgpu_ttm_set_buffer_funcs_status(adev, true);
3631
fcf0649f
CZ
3632 return r;
3633}
3634
e3ecdffa
AD
3635/**
3636 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3637 *
3638 * @adev: amdgpu_device pointer
3639 *
3640 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3641 */
4e99a44e 3642static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
048765ad 3643{
6867e1b5
ML
3644 if (amdgpu_sriov_vf(adev)) {
3645 if (adev->is_atom_fw) {
58ff791a 3646 if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
6867e1b5
ML
3647 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3648 } else {
3649 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3650 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3651 }
3652
3653 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3654 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
a5bde2f9 3655 }
048765ad
AR
3656}
3657
e3ecdffa
AD
3658/**
3659 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3660 *
3661 * @asic_type: AMD asic type
3662 *
3663 * Check if there is DC (new modesetting infrastructre) support for an asic.
3664 * returns true if DC has support, false if not.
3665 */
4562236b
HW
3666bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3667{
3668 switch (asic_type) {
0637d417
AD
3669#ifdef CONFIG_DRM_AMDGPU_SI
3670 case CHIP_HAINAN:
3671#endif
3672 case CHIP_TOPAZ:
3673 /* chips with no display hardware */
3674 return false;
4562236b 3675#if defined(CONFIG_DRM_AMD_DC)
64200c46
MR
3676 case CHIP_TAHITI:
3677 case CHIP_PITCAIRN:
3678 case CHIP_VERDE:
3679 case CHIP_OLAND:
2d32ffd6
AD
3680 /*
3681 * We have systems in the wild with these ASICs that require
3682 * LVDS and VGA support which is not supported with DC.
3683 *
3684 * Fallback to the non-DC driver here by default so as not to
3685 * cause regressions.
3686 */
3687#if defined(CONFIG_DRM_AMD_DC_SI)
3688 return amdgpu_dc > 0;
3689#else
3690 return false;
64200c46 3691#endif
4562236b 3692 case CHIP_BONAIRE:
0d6fbccb 3693 case CHIP_KAVERI:
367e6687
AD
3694 case CHIP_KABINI:
3695 case CHIP_MULLINS:
d9fda248
HW
3696 /*
3697 * We have systems in the wild with these ASICs that require
b5a0168e 3698 * VGA support which is not supported with DC.
d9fda248
HW
3699 *
3700 * Fallback to the non-DC driver here by default so as not to
3701 * cause regressions.
3702 */
3703 return amdgpu_dc > 0;
f7f12b25 3704 default:
fd187853 3705 return amdgpu_dc != 0;
f7f12b25 3706#else
4562236b 3707 default:
93b09a9a 3708 if (amdgpu_dc > 0)
b8920e1e 3709 DRM_INFO_ONCE("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
4562236b 3710 return false;
f7f12b25 3711#endif
4562236b
HW
3712 }
3713}
3714
3715/**
3716 * amdgpu_device_has_dc_support - check if dc is supported
3717 *
982a820b 3718 * @adev: amdgpu_device pointer
4562236b
HW
3719 *
3720 * Returns true for supported, false for not supported
3721 */
3722bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3723{
25263da3 3724 if (adev->enable_virtual_display ||
abaf210c 3725 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
2555039d
XY
3726 return false;
3727
4562236b
HW
3728 return amdgpu_device_asic_has_dc_support(adev->asic_type);
3729}
3730
d4535e2c
AG
3731static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3732{
3733 struct amdgpu_device *adev =
3734 container_of(__work, struct amdgpu_device, xgmi_reset_work);
d95e8e97 3735 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
d4535e2c 3736
c6a6e2db
AG
3737 /* It's a bug to not have a hive within this function */
3738 if (WARN_ON(!hive))
3739 return;
3740
3741 /*
3742 * Use task barrier to synchronize all xgmi reset works across the
3743 * hive. task_barrier_enter and task_barrier_exit will block
3744 * until all the threads running the xgmi reset works reach
3745 * those points. task_barrier_full will do both blocks.
3746 */
3747 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3748
3749 task_barrier_enter(&hive->tb);
4a580877 3750 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
c6a6e2db
AG
3751
3752 if (adev->asic_reset_res)
3753 goto fail;
3754
3755 task_barrier_exit(&hive->tb);
4a580877 3756 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
c6a6e2db
AG
3757
3758 if (adev->asic_reset_res)
3759 goto fail;
43c4d576 3760
21226f02 3761 amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
c6a6e2db
AG
3762 } else {
3763
3764 task_barrier_full(&hive->tb);
3765 adev->asic_reset_res = amdgpu_asic_reset(adev);
3766 }
ce316fa5 3767
c6a6e2db 3768fail:
d4535e2c 3769 if (adev->asic_reset_res)
fed184e9 3770 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
4a580877 3771 adev->asic_reset_res, adev_to_drm(adev)->unique);
d95e8e97 3772 amdgpu_put_xgmi_hive(hive);
d4535e2c
AG
3773}
3774
71f98027
AD
3775static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3776{
3777 char *input = amdgpu_lockup_timeout;
3778 char *timeout_setting = NULL;
3779 int index = 0;
3780 long timeout;
3781 int ret = 0;
3782
3783 /*
67387dfe
AD
3784 * By default timeout for non compute jobs is 10000
3785 * and 60000 for compute jobs.
71f98027 3786 * In SR-IOV or passthrough mode, timeout for compute
b7b2a316 3787 * jobs are 60000 by default.
71f98027
AD
3788 */
3789 adev->gfx_timeout = msecs_to_jiffies(10000);
3790 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
9882e278
ED
3791 if (amdgpu_sriov_vf(adev))
3792 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3793 msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
71f98027 3794 else
67387dfe 3795 adev->compute_timeout = msecs_to_jiffies(60000);
71f98027 3796
f440ff44 3797 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
71f98027 3798 while ((timeout_setting = strsep(&input, ",")) &&
f440ff44 3799 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
71f98027
AD
3800 ret = kstrtol(timeout_setting, 0, &timeout);
3801 if (ret)
3802 return ret;
3803
3804 if (timeout == 0) {
3805 index++;
3806 continue;
3807 } else if (timeout < 0) {
3808 timeout = MAX_SCHEDULE_TIMEOUT;
127aedf9
CK
3809 dev_warn(adev->dev, "lockup timeout disabled");
3810 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
71f98027
AD
3811 } else {
3812 timeout = msecs_to_jiffies(timeout);
3813 }
3814
3815 switch (index++) {
3816 case 0:
3817 adev->gfx_timeout = timeout;
3818 break;
3819 case 1:
3820 adev->compute_timeout = timeout;
3821 break;
3822 case 2:
3823 adev->sdma_timeout = timeout;
3824 break;
3825 case 3:
3826 adev->video_timeout = timeout;
3827 break;
3828 default:
3829 break;
3830 }
3831 }
3832 /*
3833 * There is only one value specified and
3834 * it should apply to all non-compute jobs.
3835 */
bcccee89 3836 if (index == 1) {
71f98027 3837 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
bcccee89
ED
3838 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3839 adev->compute_timeout = adev->gfx_timeout;
3840 }
71f98027
AD
3841 }
3842
3843 return ret;
3844}
d4535e2c 3845
4a74c38c
PY
3846/**
3847 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3848 *
3849 * @adev: amdgpu_device pointer
3850 *
3851 * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3852 */
3853static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3854{
3855 struct iommu_domain *domain;
3856
3857 domain = iommu_get_domain_for_dev(adev->dev);
3858 if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3859 adev->ram_is_direct_mapped = true;
3860}
3861
77f3a5cd 3862static const struct attribute *amdgpu_dev_attributes[] = {
77f3a5cd
ND
3863 &dev_attr_pcie_replay_count.attr,
3864 NULL
3865};
3866
02ff519e
AD
3867static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
3868{
3869 if (amdgpu_mcbp == 1)
3870 adev->gfx.mcbp = true;
1e9e15dc
JZ
3871 else if (amdgpu_mcbp == 0)
3872 adev->gfx.mcbp = false;
50a7c876 3873
02ff519e
AD
3874 if (amdgpu_sriov_vf(adev))
3875 adev->gfx.mcbp = true;
3876
3877 if (adev->gfx.mcbp)
3878 DRM_INFO("MCBP is enabled\n");
3879}
3880
d38ceaf9
AD
3881/**
3882 * amdgpu_device_init - initialize the driver
3883 *
3884 * @adev: amdgpu_device pointer
d38ceaf9
AD
3885 * @flags: driver flags
3886 *
3887 * Initializes the driver info and hw (all asics).
3888 * Returns 0 for success or an error on failure.
3889 * Called at driver startup.
3890 */
3891int amdgpu_device_init(struct amdgpu_device *adev,
d38ceaf9
AD
3892 uint32_t flags)
3893{
8aba21b7
LT
3894 struct drm_device *ddev = adev_to_drm(adev);
3895 struct pci_dev *pdev = adev->pdev;
d38ceaf9 3896 int r, i;
b98c6299 3897 bool px = false;
95844d20 3898 u32 max_MBps;
59e9fff1 3899 int tmp;
d38ceaf9
AD
3900
3901 adev->shutdown = false;
d38ceaf9 3902 adev->flags = flags;
4e66d7d2
YZ
3903
3904 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3905 adev->asic_type = amdgpu_force_asic_type;
3906 else
3907 adev->asic_type = flags & AMD_ASIC_MASK;
3908
d38ceaf9 3909 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
593aa2d2 3910 if (amdgpu_emu_mode == 1)
8bdab6bb 3911 adev->usec_timeout *= 10;
770d13b1 3912 adev->gmc.gart_size = 512 * 1024 * 1024;
d38ceaf9
AD
3913 adev->accel_working = false;
3914 adev->num_rings = 0;
68ce8b24 3915 RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
d38ceaf9
AD
3916 adev->mman.buffer_funcs = NULL;
3917 adev->mman.buffer_funcs_ring = NULL;
3918 adev->vm_manager.vm_pte_funcs = NULL;
0c88b430 3919 adev->vm_manager.vm_pte_num_scheds = 0;
132f34e4 3920 adev->gmc.gmc_funcs = NULL;
7bd939d0 3921 adev->harvest_ip_mask = 0x0;
f54d1867 3922 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
b8866c26 3923 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
d38ceaf9
AD
3924
3925 adev->smc_rreg = &amdgpu_invalid_rreg;
3926 adev->smc_wreg = &amdgpu_invalid_wreg;
3927 adev->pcie_rreg = &amdgpu_invalid_rreg;
3928 adev->pcie_wreg = &amdgpu_invalid_wreg;
0c552ed3
LM
3929 adev->pcie_rreg_ext = &amdgpu_invalid_rreg_ext;
3930 adev->pcie_wreg_ext = &amdgpu_invalid_wreg_ext;
36b9a952
HR
3931 adev->pciep_rreg = &amdgpu_invalid_rreg;
3932 adev->pciep_wreg = &amdgpu_invalid_wreg;
4fa1c6a6
TZ
3933 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3934 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
a76b2870
CL
3935 adev->pcie_rreg64_ext = &amdgpu_invalid_rreg64_ext;
3936 adev->pcie_wreg64_ext = &amdgpu_invalid_wreg64_ext;
d38ceaf9
AD
3937 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3938 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3939 adev->didt_rreg = &amdgpu_invalid_rreg;
3940 adev->didt_wreg = &amdgpu_invalid_wreg;
ccdbb20a
RZ
3941 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3942 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
3943 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3944 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3945
3e39ab90
AD
3946 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3947 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3948 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
d38ceaf9
AD
3949
3950 /* mutex initialization are all done here so we
b8920e1e
SS
3951 * can recall function without having locking issues
3952 */
0e5ca0d1 3953 mutex_init(&adev->firmware.mutex);
d38ceaf9
AD
3954 mutex_init(&adev->pm.mutex);
3955 mutex_init(&adev->gfx.gpu_clock_mutex);
3956 mutex_init(&adev->srbm_mutex);
b8866c26 3957 mutex_init(&adev->gfx.pipe_reserve_mutex);
d23ee13f 3958 mutex_init(&adev->gfx.gfx_off_mutex);
98a54e88 3959 mutex_init(&adev->gfx.partition_mutex);
d38ceaf9 3960 mutex_init(&adev->grbm_idx_mutex);
d38ceaf9 3961 mutex_init(&adev->mn_lock);
e23b74aa 3962 mutex_init(&adev->virt.vf_errors.lock);
d38ceaf9 3963 hash_init(adev->mn_hash);
32eaeae0 3964 mutex_init(&adev->psp.mutex);
bd052211 3965 mutex_init(&adev->notifier_lock);
8cda7a4f 3966 mutex_init(&adev->pm.stable_pstate_ctx_lock);
f113cc32 3967 mutex_init(&adev->benchmark_mutex);
d38ceaf9 3968
ab3b9de6 3969 amdgpu_device_init_apu_flags(adev);
9f6a7857 3970
912dfc84
EQ
3971 r = amdgpu_device_check_arguments(adev);
3972 if (r)
3973 return r;
d38ceaf9 3974
d38ceaf9
AD
3975 spin_lock_init(&adev->mmio_idx_lock);
3976 spin_lock_init(&adev->smc_idx_lock);
3977 spin_lock_init(&adev->pcie_idx_lock);
3978 spin_lock_init(&adev->uvd_ctx_idx_lock);
3979 spin_lock_init(&adev->didt_idx_lock);
ccdbb20a 3980 spin_lock_init(&adev->gc_cac_idx_lock);
16abb5d2 3981 spin_lock_init(&adev->se_cac_idx_lock);
d38ceaf9 3982 spin_lock_init(&adev->audio_endpt_idx_lock);
95844d20 3983 spin_lock_init(&adev->mm_stats.lock);
d38ceaf9 3984
0c4e7fa5
CZ
3985 INIT_LIST_HEAD(&adev->shadow_list);
3986 mutex_init(&adev->shadow_list_lock);
3987
655ce9cb 3988 INIT_LIST_HEAD(&adev->reset_list);
3989
6492e1b0 3990 INIT_LIST_HEAD(&adev->ras_list);
3991
3e38b634
EQ
3992 INIT_LIST_HEAD(&adev->pm.od_kobj_list);
3993
beff74bc
AD
3994 INIT_DELAYED_WORK(&adev->delayed_init_work,
3995 amdgpu_device_delayed_init_work_handler);
1e317b99
RZ
3996 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3997 amdgpu_device_delay_enable_gfx_off);
2dc80b00 3998
d4535e2c
AG
3999 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
4000
d23ee13f 4001 adev->gfx.gfx_off_req_count = 1;
0ad7347a
AA
4002 adev->gfx.gfx_off_residency = 0;
4003 adev->gfx.gfx_off_entrycount = 0;
b6e79d9a 4004 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
b1ddf548 4005
b265bdbd
EQ
4006 atomic_set(&adev->throttling_logging_enabled, 1);
4007 /*
4008 * If throttling continues, logging will be performed every minute
4009 * to avoid log flooding. "-1" is subtracted since the thermal
4010 * throttling interrupt comes every second. Thus, the total logging
4011 * interval is 59 seconds(retelimited printk interval) + 1(waiting
4012 * for throttling interrupt) = 60 seconds.
4013 */
4014 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
4015 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
4016
0fa49558
AX
4017 /* Registers mapping */
4018 /* TODO: block userspace mapping of io register */
da69c161
KW
4019 if (adev->asic_type >= CHIP_BONAIRE) {
4020 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
4021 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
4022 } else {
4023 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
4024 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
4025 }
d38ceaf9 4026
6c08e0ef
EQ
4027 for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
4028 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
4029
d38ceaf9 4030 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
b8920e1e 4031 if (!adev->rmmio)
d38ceaf9 4032 return -ENOMEM;
b8920e1e 4033
d38ceaf9 4034 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
b8920e1e 4035 DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size);
d38ceaf9 4036
436afdfa
PY
4037 /*
4038 * Reset domain needs to be present early, before XGMI hive discovered
4039 * (if any) and intitialized to use reset sem and in_gpu reset flag
4040 * early on during init and before calling to RREG32.
4041 */
4042 adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
4043 if (!adev->reset_domain)
4044 return -ENOMEM;
4045
3aa0115d
ML
4046 /* detect hw virtualization here */
4047 amdgpu_detect_virtualization(adev);
4048
04e85958
TL
4049 amdgpu_device_get_pcie_info(adev);
4050
f5e4cc84
YW
4051 r = amdgpu_aca_init(adev);
4052 if (r)
4053 return r;
4054
dffa11b4
ML
4055 r = amdgpu_device_get_job_timeout_settings(adev);
4056 if (r) {
4057 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
4ef87d8f 4058 return r;
a190d1c7
XY
4059 }
4060
d38ceaf9 4061 /* early init functions */
06ec9070 4062 r = amdgpu_device_ip_early_init(adev);
d38ceaf9 4063 if (r)
4ef87d8f 4064 return r;
d38ceaf9 4065
02ff519e
AD
4066 amdgpu_device_set_mcbp(adev);
4067
b7cdb41e
ML
4068 /* Get rid of things like offb */
4069 r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
4070 if (r)
4071 return r;
4072
4d33e704
SK
4073 /* Enable TMZ based on IP_VERSION */
4074 amdgpu_gmc_tmz_set(adev);
4075
957b0787 4076 amdgpu_gmc_noretry_set(adev);
4a0165f0
VS
4077 /* Need to get xgmi info early to decide the reset behavior*/
4078 if (adev->gmc.xgmi.supported) {
4079 r = adev->gfxhub.funcs->get_xgmi_info(adev);
4080 if (r)
4081 return r;
4082 }
4083
8e6d0b69 4084 /* enable PCIE atomic ops */
b4520bfd
GW
4085 if (amdgpu_sriov_vf(adev)) {
4086 if (adev->virt.fw_reserve.p_pf2vf)
4087 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
4088 adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
4089 (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
0e768043
YZ
4090 /* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
4091 * internal path natively support atomics, set have_atomics_support to true.
4092 */
b4520bfd 4093 } else if ((adev->flags & AMD_IS_APU) &&
4e8303cf
LL
4094 (amdgpu_ip_version(adev, GC_HWIP, 0) >
4095 IP_VERSION(9, 0, 0))) {
0e768043 4096 adev->have_atomics_support = true;
b4520bfd 4097 } else {
8e6d0b69 4098 adev->have_atomics_support =
4099 !pci_enable_atomic_ops_to_root(adev->pdev,
4100 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
4101 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
b4520bfd
GW
4102 }
4103
8e6d0b69 4104 if (!adev->have_atomics_support)
4105 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
4106
6585661d 4107 /* doorbell bar mapping and doorbell index init*/
43c064db 4108 amdgpu_doorbell_init(adev);
6585661d 4109
9475a943
SL
4110 if (amdgpu_emu_mode == 1) {
4111 /* post the asic on emulation mode */
4112 emu_soc_asic_init(adev);
bfca0289 4113 goto fence_driver_init;
9475a943 4114 }
bfca0289 4115
04442bf7
LL
4116 amdgpu_reset_init(adev);
4117
4e99a44e 4118 /* detect if we are with an SRIOV vbios */
b4520bfd
GW
4119 if (adev->bios)
4120 amdgpu_device_detect_sriov_bios(adev);
048765ad 4121
95e8e59e
AD
4122 /* check if we need to reset the asic
4123 * E.g., driver was not cleanly unloaded previously, etc.
4124 */
f14899fd 4125 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
e3c1b071 4126 if (adev->gmc.xgmi.num_physical_nodes) {
4127 dev_info(adev->dev, "Pending hive reset.\n");
4128 adev->gmc.xgmi.pending_reset = true;
4129 /* Only need to init necessary block for SMU to handle the reset */
4130 for (i = 0; i < adev->num_ip_blocks; i++) {
4131 if (!adev->ip_blocks[i].status.valid)
4132 continue;
4133 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
4134 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
4135 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
4136 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
751f43e7 4137 DRM_DEBUG("IP %s disabled for hw_init.\n",
e3c1b071 4138 adev->ip_blocks[i].version->funcs->name);
4139 adev->ip_blocks[i].status.hw = true;
4140 }
4141 }
4142 } else {
5f38ac54
KF
4143 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
4144 case IP_VERSION(13, 0, 0):
4145 case IP_VERSION(13, 0, 7):
4146 case IP_VERSION(13, 0, 10):
4147 r = psp_gpu_reset(adev);
4148 break;
4149 default:
4150 tmp = amdgpu_reset_method;
4151 /* It should do a default reset when loading or reloading the driver,
4152 * regardless of the module parameter reset_method.
4153 */
4154 amdgpu_reset_method = AMD_RESET_METHOD_NONE;
4155 r = amdgpu_asic_reset(adev);
4156 amdgpu_reset_method = tmp;
4157 break;
4158 }
4159
e3c1b071 4160 if (r) {
4161 dev_err(adev->dev, "asic reset on init failed\n");
4162 goto failed;
4163 }
95e8e59e
AD
4164 }
4165 }
4166
d38ceaf9 4167 /* Post card if necessary */
39c640c0 4168 if (amdgpu_device_need_post(adev)) {
d38ceaf9 4169 if (!adev->bios) {
bec86378 4170 dev_err(adev->dev, "no vBIOS found\n");
83ba126a
AD
4171 r = -EINVAL;
4172 goto failed;
d38ceaf9 4173 }
bec86378 4174 DRM_INFO("GPU posting now...\n");
4d2997ab 4175 r = amdgpu_device_asic_init(adev);
4e99a44e
ML
4176 if (r) {
4177 dev_err(adev->dev, "gpu post error!\n");
4178 goto failed;
4179 }
d38ceaf9
AD
4180 }
4181
9535a86a
SZ
4182 if (adev->bios) {
4183 if (adev->is_atom_fw) {
4184 /* Initialize clocks */
4185 r = amdgpu_atomfirmware_get_clock_info(adev);
4186 if (r) {
4187 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
4188 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
4189 goto failed;
4190 }
4191 } else {
4192 /* Initialize clocks */
4193 r = amdgpu_atombios_get_clock_info(adev);
4194 if (r) {
4195 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
4196 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
4197 goto failed;
4198 }
4199 /* init i2c buses */
4200 if (!amdgpu_device_has_dc_support(adev))
4201 amdgpu_atombios_i2c_init(adev);
a5bde2f9 4202 }
2c1a2784 4203 }
d38ceaf9 4204
bfca0289 4205fence_driver_init:
d38ceaf9 4206 /* Fence driver */
067f44c8 4207 r = amdgpu_fence_driver_sw_init(adev);
2c1a2784 4208 if (r) {
067f44c8 4209 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
e23b74aa 4210 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
83ba126a 4211 goto failed;
2c1a2784 4212 }
d38ceaf9
AD
4213
4214 /* init the mode config */
4a580877 4215 drm_mode_config_init(adev_to_drm(adev));
d38ceaf9 4216
06ec9070 4217 r = amdgpu_device_ip_init(adev);
d38ceaf9 4218 if (r) {
06ec9070 4219 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
e23b74aa 4220 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
970fd197 4221 goto release_ras_con;
d38ceaf9
AD
4222 }
4223
8d35a259
LG
4224 amdgpu_fence_driver_hw_init(adev);
4225
d69b8971
YZ
4226 dev_info(adev->dev,
4227 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
d7f72fe4
YZ
4228 adev->gfx.config.max_shader_engines,
4229 adev->gfx.config.max_sh_per_se,
4230 adev->gfx.config.max_cu_per_sh,
4231 adev->gfx.cu_info.number);
4232
d38ceaf9
AD
4233 adev->accel_working = true;
4234
e59c0205
AX
4235 amdgpu_vm_check_compute_bug(adev);
4236
95844d20
MO
4237 /* Initialize the buffer migration limit. */
4238 if (amdgpu_moverate >= 0)
4239 max_MBps = amdgpu_moverate;
4240 else
4241 max_MBps = 8; /* Allow 8 MB/s. */
4242 /* Get a log2 for easy divisions. */
4243 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
4244
b0adca4d
EQ
4245 /*
4246 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
4247 * Otherwise the mgpu fan boost feature will be skipped due to the
4248 * gpu instance is counted less.
4249 */
4250 amdgpu_register_gpu_instance(adev);
4251
d38ceaf9
AD
4252 /* enable clockgating, etc. after ib tests, etc. since some blocks require
4253 * explicit gating rather than handling it automatically.
4254 */
e3c1b071 4255 if (!adev->gmc.xgmi.pending_reset) {
4256 r = amdgpu_device_ip_late_init(adev);
4257 if (r) {
4258 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
4259 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
970fd197 4260 goto release_ras_con;
e3c1b071 4261 }
4262 /* must succeed. */
4263 amdgpu_ras_resume(adev);
4264 queue_delayed_work(system_wq, &adev->delayed_init_work,
4265 msecs_to_jiffies(AMDGPU_RESUME_MS));
2c1a2784 4266 }
d38ceaf9 4267
38eecbe0
CL
4268 if (amdgpu_sriov_vf(adev)) {
4269 amdgpu_virt_release_full_gpu(adev, true);
2c738637 4270 flush_delayed_work(&adev->delayed_init_work);
38eecbe0 4271 }
2c738637 4272
90bcb9b5
EQ
4273 /*
4274 * Place those sysfs registering after `late_init`. As some of those
4275 * operations performed in `late_init` might affect the sysfs
4276 * interfaces creating.
4277 */
4278 r = amdgpu_atombios_sysfs_init(adev);
4279 if (r)
4280 drm_err(&adev->ddev,
4281 "registering atombios sysfs failed (%d).\n", r);
4282
4283 r = amdgpu_pm_sysfs_init(adev);
4284 if (r)
4285 DRM_ERROR("registering pm sysfs failed (%d).\n", r);
4286
4287 r = amdgpu_ucode_sysfs_init(adev);
4288 if (r) {
4289 adev->ucode_sysfs_en = false;
4290 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
4291 } else
4292 adev->ucode_sysfs_en = true;
4293
77f3a5cd 4294 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
5aea5327 4295 if (r)
77f3a5cd 4296 dev_err(adev->dev, "Could not create amdgpu device attr\n");
bd607166 4297
76da73f0
LL
4298 r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group);
4299 if (r)
4300 dev_err(adev->dev,
4301 "Could not create amdgpu board attributes\n");
4302
7957ec80 4303 amdgpu_fru_sysfs_init(adev);
af39e6f4 4304 amdgpu_reg_state_sysfs_init(adev);
7957ec80 4305
d155bef0
AB
4306 if (IS_ENABLED(CONFIG_PERF_EVENTS))
4307 r = amdgpu_pmu_init(adev);
9c7c85f7
JK
4308 if (r)
4309 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
4310
c1dd4aa6
AG
4311 /* Have stored pci confspace at hand for restore in sudden PCI error */
4312 if (amdgpu_device_cache_pci_state(adev->pdev))
4313 pci_restore_state(pdev);
4314
8c3dd61c
KHF
4315 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
4316 /* this will fail for cards that aren't VGA class devices, just
b8920e1e
SS
4317 * ignore it
4318 */
8c3dd61c 4319 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
bf44e8ce 4320 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
8c3dd61c 4321
d37a3929
OC
4322 px = amdgpu_device_supports_px(ddev);
4323
7b1c6263 4324 if (px || (!dev_is_removable(&adev->pdev->dev) &&
d37a3929 4325 apple_gmux_detect(NULL, NULL)))
8c3dd61c
KHF
4326 vga_switcheroo_register_client(adev->pdev,
4327 &amdgpu_switcheroo_ops, px);
d37a3929
OC
4328
4329 if (px)
8c3dd61c 4330 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
8c3dd61c 4331
e3c1b071 4332 if (adev->gmc.xgmi.pending_reset)
4333 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
4334 msecs_to_jiffies(AMDGPU_RESUME_MS));
4335
4a74c38c
PY
4336 amdgpu_device_check_iommu_direct_map(adev);
4337
d38ceaf9 4338 return 0;
83ba126a 4339
970fd197 4340release_ras_con:
38eecbe0
CL
4341 if (amdgpu_sriov_vf(adev))
4342 amdgpu_virt_release_full_gpu(adev, true);
4343
4344 /* failed in exclusive mode due to timeout */
4345 if (amdgpu_sriov_vf(adev) &&
4346 !amdgpu_sriov_runtime(adev) &&
4347 amdgpu_virt_mmio_blocked(adev) &&
4348 !amdgpu_virt_wait_reset(adev)) {
4349 dev_err(adev->dev, "VF exclusive mode timeout\n");
4350 /* Don't send request since VF is inactive. */
4351 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
4352 adev->virt.ops = NULL;
4353 r = -EAGAIN;
4354 }
970fd197
SY
4355 amdgpu_release_ras_context(adev);
4356
83ba126a 4357failed:
89041940 4358 amdgpu_vf_error_trans_all(adev);
8840a387 4359
83ba126a 4360 return r;
d38ceaf9
AD
4361}
4362
07775fc1
AG
4363static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
4364{
62d5f9f7 4365
07775fc1
AG
4366 /* Clear all CPU mappings pointing to this device */
4367 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
4368
4369 /* Unmap all mapped bars - Doorbell, registers and VRAM */
43c064db 4370 amdgpu_doorbell_fini(adev);
07775fc1
AG
4371
4372 iounmap(adev->rmmio);
4373 adev->rmmio = NULL;
4374 if (adev->mman.aper_base_kaddr)
4375 iounmap(adev->mman.aper_base_kaddr);
4376 adev->mman.aper_base_kaddr = NULL;
4377
4378 /* Memory manager related */
a0ba1279 4379 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
07775fc1
AG
4380 arch_phys_wc_del(adev->gmc.vram_mtrr);
4381 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
4382 }
4383}
4384
d38ceaf9 4385/**
bbe04dec 4386 * amdgpu_device_fini_hw - tear down the driver
d38ceaf9
AD
4387 *
4388 * @adev: amdgpu_device pointer
4389 *
4390 * Tear down the driver info (all asics).
4391 * Called at driver shutdown.
4392 */
72c8c97b 4393void amdgpu_device_fini_hw(struct amdgpu_device *adev)
d38ceaf9 4394{
aac89168 4395 dev_info(adev->dev, "amdgpu: finishing device.\n");
9f875167 4396 flush_delayed_work(&adev->delayed_init_work);
d0d13fe8 4397 adev->shutdown = true;
9f875167 4398
752c683d
ML
4399 /* make sure IB test finished before entering exclusive mode
4400 * to avoid preemption on IB test
b8920e1e 4401 */
519b8b76 4402 if (amdgpu_sriov_vf(adev)) {
752c683d 4403 amdgpu_virt_request_full_gpu(adev, false);
519b8b76
BZ
4404 amdgpu_virt_fini_data_exchange(adev);
4405 }
752c683d 4406
e5b03032
ML
4407 /* disable all interrupts */
4408 amdgpu_irq_disable_all(adev);
47fc644f 4409 if (adev->mode_info.mode_config_initialized) {
1053b9c9 4410 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
4a580877 4411 drm_helper_force_disable_all(adev_to_drm(adev));
ff97cba8 4412 else
4a580877 4413 drm_atomic_helper_shutdown(adev_to_drm(adev));
ff97cba8 4414 }
8d35a259 4415 amdgpu_fence_driver_hw_fini(adev);
72c8c97b 4416
cd3a8a59 4417 if (adev->mman.initialized)
9bff18d1 4418 drain_workqueue(adev->mman.bdev.wq);
98f56188 4419
53e9d836 4420 if (adev->pm.sysfs_initialized)
7c868b59 4421 amdgpu_pm_sysfs_fini(adev);
72c8c97b
AG
4422 if (adev->ucode_sysfs_en)
4423 amdgpu_ucode_sysfs_fini(adev);
4424 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
7957ec80 4425 amdgpu_fru_sysfs_fini(adev);
72c8c97b 4426
af39e6f4
LL
4427 amdgpu_reg_state_sysfs_fini(adev);
4428
232d1d43
SY
4429 /* disable ras feature must before hw fini */
4430 amdgpu_ras_pre_fini(adev);
4431
b7043800
AD
4432 amdgpu_ttm_set_buffer_funcs_status(adev, false);
4433
e9669fb7 4434 amdgpu_device_ip_fini_early(adev);
d10d0daa 4435
a3848df6
YW
4436 amdgpu_irq_fini_hw(adev);
4437
b6fd6e0f
SK
4438 if (adev->mman.initialized)
4439 ttm_device_clear_dma_mappings(&adev->mman.bdev);
894c6890 4440
d10d0daa 4441 amdgpu_gart_dummy_page_fini(adev);
07775fc1 4442
39934d3e
VP
4443 if (drm_dev_is_unplugged(adev_to_drm(adev)))
4444 amdgpu_device_unmap_mmio(adev);
87172e89 4445
72c8c97b
AG
4446}
4447
4448void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4449{
62d5f9f7 4450 int idx;
d37a3929 4451 bool px;
62d5f9f7 4452
8d35a259 4453 amdgpu_fence_driver_sw_fini(adev);
a5c5d8d5 4454 amdgpu_device_ip_fini(adev);
b31d3063 4455 amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
d38ceaf9 4456 adev->accel_working = false;
68ce8b24 4457 dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
04442bf7
LL
4458
4459 amdgpu_reset_fini(adev);
4460
f5e4cc84
YW
4461 amdgpu_aca_fini(adev);
4462
d38ceaf9 4463 /* free i2c buses */
4562236b
HW
4464 if (!amdgpu_device_has_dc_support(adev))
4465 amdgpu_i2c_fini(adev);
bfca0289
SL
4466
4467 if (amdgpu_emu_mode != 1)
4468 amdgpu_atombios_fini(adev);
4469
d38ceaf9
AD
4470 kfree(adev->bios);
4471 adev->bios = NULL;
d37a3929 4472
8a2b5139
LL
4473 kfree(adev->fru_info);
4474 adev->fru_info = NULL;
4475
d37a3929
OC
4476 px = amdgpu_device_supports_px(adev_to_drm(adev));
4477
7b1c6263 4478 if (px || (!dev_is_removable(&adev->pdev->dev) &&
d37a3929 4479 apple_gmux_detect(NULL, NULL)))
84c8b22e 4480 vga_switcheroo_unregister_client(adev->pdev);
d37a3929
OC
4481
4482 if (px)
83ba126a 4483 vga_switcheroo_fini_domain_pm_ops(adev->dev);
d37a3929 4484
38d6be81 4485 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
b8779475 4486 vga_client_unregister(adev->pdev);
e9bc1bf7 4487
62d5f9f7
LS
4488 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4489
4490 iounmap(adev->rmmio);
4491 adev->rmmio = NULL;
43c064db 4492 amdgpu_doorbell_fini(adev);
62d5f9f7
LS
4493 drm_dev_exit(idx);
4494 }
4495
d155bef0
AB
4496 if (IS_ENABLED(CONFIG_PERF_EVENTS))
4497 amdgpu_pmu_fini(adev);
72de33f8 4498 if (adev->mman.discovery_bin)
a190d1c7 4499 amdgpu_discovery_fini(adev);
72c8c97b 4500
cfbb6b00
AG
4501 amdgpu_reset_put_reset_domain(adev->reset_domain);
4502 adev->reset_domain = NULL;
4503
72c8c97b
AG
4504 kfree(adev->pci_state);
4505
d38ceaf9
AD
4506}
4507
58144d28
ND
4508/**
4509 * amdgpu_device_evict_resources - evict device resources
4510 * @adev: amdgpu device object
4511 *
4512 * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4513 * of the vram memory type. Mainly used for evicting device resources
4514 * at suspend time.
4515 *
4516 */
7863c155 4517static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
58144d28 4518{
7863c155
ML
4519 int ret;
4520
e53d9665
ML
4521 /* No need to evict vram on APUs for suspend to ram or s2idle */
4522 if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
7863c155 4523 return 0;
58144d28 4524
7863c155
ML
4525 ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4526 if (ret)
58144d28 4527 DRM_WARN("evicting device resources failed\n");
7863c155 4528 return ret;
58144d28 4529}
d38ceaf9
AD
4530
4531/*
4532 * Suspend & resume.
4533 */
5095d541
ML
4534/**
4535 * amdgpu_device_prepare - prepare for device suspend
4536 *
4537 * @dev: drm dev pointer
4538 *
4539 * Prepare to put the hw in the suspend state (all asics).
4540 * Returns 0 for success or an error on failure.
4541 * Called at driver suspend.
4542 */
4543int amdgpu_device_prepare(struct drm_device *dev)
4544{
4545 struct amdgpu_device *adev = drm_to_adev(dev);
cb11ca32 4546 int i, r;
5095d541
ML
4547
4548 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4549 return 0;
4550
4551 /* Evict the majority of BOs before starting suspend sequence */
4552 r = amdgpu_device_evict_resources(adev);
4553 if (r)
4554 return r;
4555
cb11ca32
ML
4556 for (i = 0; i < adev->num_ip_blocks; i++) {
4557 if (!adev->ip_blocks[i].status.valid)
4558 continue;
4559 if (!adev->ip_blocks[i].version->funcs->prepare_suspend)
4560 continue;
4561 r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev);
4562 if (r)
4563 return r;
4564 }
4565
5095d541
ML
4566 return 0;
4567}
4568
d38ceaf9 4569/**
810ddc3a 4570 * amdgpu_device_suspend - initiate device suspend
d38ceaf9 4571 *
87e3f136 4572 * @dev: drm dev pointer
87e3f136 4573 * @fbcon : notify the fbdev of suspend
d38ceaf9
AD
4574 *
4575 * Puts the hw in the suspend state (all asics).
4576 * Returns 0 for success or an error on failure.
4577 * Called at driver suspend.
4578 */
de185019 4579int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
d38ceaf9 4580{
a2e15b0e 4581 struct amdgpu_device *adev = drm_to_adev(dev);
d7274ec7 4582 int r = 0;
d38ceaf9 4583
d38ceaf9
AD
4584 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4585 return 0;
4586
44779b43 4587 adev->in_suspend = true;
3fa8f89d 4588
d7274ec7
BZ
4589 if (amdgpu_sriov_vf(adev)) {
4590 amdgpu_virt_fini_data_exchange(adev);
4591 r = amdgpu_virt_request_full_gpu(adev, false);
4592 if (r)
4593 return r;
4594 }
4595
3fa8f89d
S
4596 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4597 DRM_WARN("smart shift update failed\n");
4598
5f818173 4599 if (fbcon)
087451f3 4600 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
5f818173 4601
beff74bc 4602 cancel_delayed_work_sync(&adev->delayed_init_work);
0dee7263 4603 flush_delayed_work(&adev->gfx.gfx_off_delay_work);
a5459475 4604
5e6932fe 4605 amdgpu_ras_suspend(adev);
4606
2196927b 4607 amdgpu_device_ip_suspend_phase1(adev);
fe1053b7 4608
c004d44e 4609 if (!adev->in_s0ix)
5d3a2d95 4610 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
94fa5660 4611
7863c155
ML
4612 r = amdgpu_device_evict_resources(adev);
4613 if (r)
4614 return r;
d38ceaf9 4615
dab96d8b
AD
4616 amdgpu_ttm_set_buffer_funcs_status(adev, false);
4617
8d35a259 4618 amdgpu_fence_driver_hw_fini(adev);
d38ceaf9 4619
2196927b 4620 amdgpu_device_ip_suspend_phase2(adev);
d38ceaf9 4621
d7274ec7
BZ
4622 if (amdgpu_sriov_vf(adev))
4623 amdgpu_virt_release_full_gpu(adev, false);
4624
2e9b1523
PY
4625 r = amdgpu_dpm_notify_rlc_state(adev, false);
4626 if (r)
4627 return r;
4628
d38ceaf9
AD
4629 return 0;
4630}
4631
4632/**
810ddc3a 4633 * amdgpu_device_resume - initiate device resume
d38ceaf9 4634 *
87e3f136 4635 * @dev: drm dev pointer
87e3f136 4636 * @fbcon : notify the fbdev of resume
d38ceaf9
AD
4637 *
4638 * Bring the hw back to operating state (all asics).
4639 * Returns 0 for success or an error on failure.
4640 * Called at driver resume.
4641 */
de185019 4642int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
d38ceaf9 4643{
1348969a 4644 struct amdgpu_device *adev = drm_to_adev(dev);
03161a6e 4645 int r = 0;
d38ceaf9 4646
d7274ec7
BZ
4647 if (amdgpu_sriov_vf(adev)) {
4648 r = amdgpu_virt_request_full_gpu(adev, true);
4649 if (r)
4650 return r;
4651 }
4652
d38ceaf9
AD
4653 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4654 return 0;
4655
62498733 4656 if (adev->in_s0ix)
bc143d8b 4657 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
628c36d7 4658
d38ceaf9 4659 /* post card */
39c640c0 4660 if (amdgpu_device_need_post(adev)) {
4d2997ab 4661 r = amdgpu_device_asic_init(adev);
74b0b157 4662 if (r)
aac89168 4663 dev_err(adev->dev, "amdgpu asic init failed\n");
74b0b157 4664 }
d38ceaf9 4665
06ec9070 4666 r = amdgpu_device_ip_resume(adev);
d7274ec7 4667
e6707218 4668 if (r) {
aac89168 4669 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
3c22c1ea 4670 goto exit;
e6707218 4671 }
8d35a259 4672 amdgpu_fence_driver_hw_init(adev);
5ceb54c6 4673
c004d44e 4674 if (!adev->in_s0ix) {
5d3a2d95
AD
4675 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4676 if (r)
3c22c1ea 4677 goto exit;
5d3a2d95 4678 }
756e6880 4679
8ed79c40
TH
4680 r = amdgpu_device_ip_late_init(adev);
4681 if (r)
4682 goto exit;
4683
4684 queue_delayed_work(system_wq, &adev->delayed_init_work,
4685 msecs_to_jiffies(AMDGPU_RESUME_MS));
3c22c1ea
SF
4686exit:
4687 if (amdgpu_sriov_vf(adev)) {
4688 amdgpu_virt_init_data_exchange(adev);
4689 amdgpu_virt_release_full_gpu(adev, true);
4690 }
4691
4692 if (r)
4693 return r;
4694
96a5d8d4 4695 /* Make sure IB tests flushed */
beff74bc 4696 flush_delayed_work(&adev->delayed_init_work);
96a5d8d4 4697
a2e15b0e 4698 if (fbcon)
087451f3 4699 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
d38ceaf9 4700
5e6932fe 4701 amdgpu_ras_resume(adev);
4702
d09ef243
AD
4703 if (adev->mode_info.num_crtc) {
4704 /*
4705 * Most of the connector probing functions try to acquire runtime pm
4706 * refs to ensure that the GPU is powered on when connector polling is
4707 * performed. Since we're calling this from a runtime PM callback,
4708 * trying to acquire rpm refs will cause us to deadlock.
4709 *
4710 * Since we're guaranteed to be holding the rpm lock, it's safe to
4711 * temporarily disable the rpm helpers so this doesn't deadlock us.
4712 */
23a1a9e5 4713#ifdef CONFIG_PM
d09ef243 4714 dev->dev->power.disable_depth++;
23a1a9e5 4715#endif
d09ef243
AD
4716 if (!adev->dc_enabled)
4717 drm_helper_hpd_irq_event(dev);
4718 else
4719 drm_kms_helper_hotplug_event(dev);
23a1a9e5 4720#ifdef CONFIG_PM
d09ef243 4721 dev->dev->power.disable_depth--;
23a1a9e5 4722#endif
d09ef243 4723 }
44779b43
RZ
4724 adev->in_suspend = false;
4725
dc907c9d
JX
4726 if (adev->enable_mes)
4727 amdgpu_mes_self_test(adev);
4728
3fa8f89d
S
4729 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4730 DRM_WARN("smart shift update failed\n");
4731
4d3b9ae5 4732 return 0;
d38ceaf9
AD
4733}
4734
e3ecdffa
AD
4735/**
4736 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4737 *
4738 * @adev: amdgpu_device pointer
4739 *
4740 * The list of all the hardware IPs that make up the asic is walked and
4741 * the check_soft_reset callbacks are run. check_soft_reset determines
4742 * if the asic is still hung or not.
4743 * Returns true if any of the IPs are still in a hung state, false if not.
4744 */
06ec9070 4745static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
63fbf42f
CZ
4746{
4747 int i;
4748 bool asic_hang = false;
4749
f993d628
ML
4750 if (amdgpu_sriov_vf(adev))
4751 return true;
4752
8bc04c29
AD
4753 if (amdgpu_asic_need_full_reset(adev))
4754 return true;
4755
63fbf42f 4756 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 4757 if (!adev->ip_blocks[i].status.valid)
63fbf42f 4758 continue;
a1255107
AD
4759 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4760 adev->ip_blocks[i].status.hang =
4761 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4762 if (adev->ip_blocks[i].status.hang) {
aac89168 4763 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
63fbf42f
CZ
4764 asic_hang = true;
4765 }
4766 }
4767 return asic_hang;
4768}
4769
e3ecdffa
AD
4770/**
4771 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4772 *
4773 * @adev: amdgpu_device pointer
4774 *
4775 * The list of all the hardware IPs that make up the asic is walked and the
4776 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
4777 * handles any IP specific hardware or software state changes that are
4778 * necessary for a soft reset to succeed.
4779 * Returns 0 on success, negative error code on failure.
4780 */
06ec9070 4781static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
d31a501e
CZ
4782{
4783 int i, r = 0;
4784
4785 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 4786 if (!adev->ip_blocks[i].status.valid)
d31a501e 4787 continue;
a1255107
AD
4788 if (adev->ip_blocks[i].status.hang &&
4789 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4790 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
d31a501e
CZ
4791 if (r)
4792 return r;
4793 }
4794 }
4795
4796 return 0;
4797}
4798
e3ecdffa
AD
4799/**
4800 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4801 *
4802 * @adev: amdgpu_device pointer
4803 *
4804 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
4805 * reset is necessary to recover.
4806 * Returns true if a full asic reset is required, false if not.
4807 */
06ec9070 4808static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
35d782fe 4809{
da146d3b
AD
4810 int i;
4811
8bc04c29
AD
4812 if (amdgpu_asic_need_full_reset(adev))
4813 return true;
4814
da146d3b 4815 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 4816 if (!adev->ip_blocks[i].status.valid)
da146d3b 4817 continue;
a1255107
AD
4818 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4819 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4820 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
98512bb8
KW
4821 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4822 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
a1255107 4823 if (adev->ip_blocks[i].status.hang) {
aac89168 4824 dev_info(adev->dev, "Some block need full reset!\n");
da146d3b
AD
4825 return true;
4826 }
4827 }
35d782fe
CZ
4828 }
4829 return false;
4830}
4831
e3ecdffa
AD
4832/**
4833 * amdgpu_device_ip_soft_reset - do a soft reset
4834 *
4835 * @adev: amdgpu_device pointer
4836 *
4837 * The list of all the hardware IPs that make up the asic is walked and the
4838 * soft_reset callbacks are run if the block is hung. soft_reset handles any
4839 * IP specific hardware or software state changes that are necessary to soft
4840 * reset the IP.
4841 * Returns 0 on success, negative error code on failure.
4842 */
06ec9070 4843static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
35d782fe
CZ
4844{
4845 int i, r = 0;
4846
4847 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 4848 if (!adev->ip_blocks[i].status.valid)
35d782fe 4849 continue;
a1255107
AD
4850 if (adev->ip_blocks[i].status.hang &&
4851 adev->ip_blocks[i].version->funcs->soft_reset) {
4852 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
35d782fe
CZ
4853 if (r)
4854 return r;
4855 }
4856 }
4857
4858 return 0;
4859}
4860
e3ecdffa
AD
4861/**
4862 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4863 *
4864 * @adev: amdgpu_device pointer
4865 *
4866 * The list of all the hardware IPs that make up the asic is walked and the
4867 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
4868 * handles any IP specific hardware or software state changes that are
4869 * necessary after the IP has been soft reset.
4870 * Returns 0 on success, negative error code on failure.
4871 */
06ec9070 4872static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
35d782fe
CZ
4873{
4874 int i, r = 0;
4875
4876 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 4877 if (!adev->ip_blocks[i].status.valid)
35d782fe 4878 continue;
a1255107
AD
4879 if (adev->ip_blocks[i].status.hang &&
4880 adev->ip_blocks[i].version->funcs->post_soft_reset)
4881 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
35d782fe
CZ
4882 if (r)
4883 return r;
4884 }
4885
4886 return 0;
4887}
4888
e3ecdffa 4889/**
c33adbc7 4890 * amdgpu_device_recover_vram - Recover some VRAM contents
e3ecdffa
AD
4891 *
4892 * @adev: amdgpu_device pointer
4893 *
4894 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
4895 * restore things like GPUVM page tables after a GPU reset where
4896 * the contents of VRAM might be lost.
403009bf
CK
4897 *
4898 * Returns:
4899 * 0 on success, negative error code on failure.
e3ecdffa 4900 */
c33adbc7 4901static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
c41d1cf6 4902{
c41d1cf6 4903 struct dma_fence *fence = NULL, *next = NULL;
403009bf 4904 struct amdgpu_bo *shadow;
e18aaea7 4905 struct amdgpu_bo_vm *vmbo;
403009bf 4906 long r = 1, tmo;
c41d1cf6
ML
4907
4908 if (amdgpu_sriov_runtime(adev))
b045d3af 4909 tmo = msecs_to_jiffies(8000);
c41d1cf6
ML
4910 else
4911 tmo = msecs_to_jiffies(100);
4912
aac89168 4913 dev_info(adev->dev, "recover vram bo from shadow start\n");
c41d1cf6 4914 mutex_lock(&adev->shadow_list_lock);
e18aaea7 4915 list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4994d1f0
LC
4916 /* If vm is compute context or adev is APU, shadow will be NULL */
4917 if (!vmbo->shadow)
4918 continue;
4919 shadow = vmbo->shadow;
4920
403009bf 4921 /* No need to recover an evicted BO */
d3116756
CK
4922 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4923 shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4924 shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
403009bf
CK
4925 continue;
4926
4927 r = amdgpu_bo_restore_shadow(shadow, &next);
4928 if (r)
4929 break;
4930
c41d1cf6 4931 if (fence) {
1712fb1a 4932 tmo = dma_fence_wait_timeout(fence, false, tmo);
403009bf
CK
4933 dma_fence_put(fence);
4934 fence = next;
1712fb1a 4935 if (tmo == 0) {
4936 r = -ETIMEDOUT;
c41d1cf6 4937 break;
1712fb1a 4938 } else if (tmo < 0) {
4939 r = tmo;
4940 break;
4941 }
403009bf
CK
4942 } else {
4943 fence = next;
c41d1cf6 4944 }
c41d1cf6
ML
4945 }
4946 mutex_unlock(&adev->shadow_list_lock);
4947
403009bf
CK
4948 if (fence)
4949 tmo = dma_fence_wait_timeout(fence, false, tmo);
c41d1cf6
ML
4950 dma_fence_put(fence);
4951
1712fb1a 4952 if (r < 0 || tmo <= 0) {
aac89168 4953 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
403009bf
CK
4954 return -EIO;
4955 }
c41d1cf6 4956
aac89168 4957 dev_info(adev->dev, "recover vram bo from shadow done\n");
403009bf 4958 return 0;
c41d1cf6
ML
4959}
4960
a90ad3c2 4961
e3ecdffa 4962/**
06ec9070 4963 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
5740682e 4964 *
982a820b 4965 * @adev: amdgpu_device pointer
87e3f136 4966 * @from_hypervisor: request from hypervisor
5740682e
ML
4967 *
4968 * do VF FLR and reinitialize Asic
3f48c681 4969 * return 0 means succeeded otherwise failed
e3ecdffa
AD
4970 */
4971static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4972 bool from_hypervisor)
5740682e
ML
4973{
4974 int r;
a5f67c93 4975 struct amdgpu_hive_info *hive = NULL;
7258fa31 4976 int retry_limit = 0;
5740682e 4977
7258fa31 4978retry:
c004d44e 4979 amdgpu_amdkfd_pre_reset(adev);
428890a3 4980
5740682e
ML
4981 if (from_hypervisor)
4982 r = amdgpu_virt_request_full_gpu(adev, true);
4983 else
4984 r = amdgpu_virt_reset_gpu(adev);
4985 if (r)
4986 return r;
f734b213 4987 amdgpu_irq_gpu_reset_resume_helper(adev);
a90ad3c2 4988
83f24a8f
HC
4989 /* some sw clean up VF needs to do before recover */
4990 amdgpu_virt_post_reset(adev);
4991
a90ad3c2 4992 /* Resume IP prior to SMC */
06ec9070 4993 r = amdgpu_device_ip_reinit_early_sriov(adev);
5740682e
ML
4994 if (r)
4995 goto error;
a90ad3c2 4996
c9ffa427 4997 amdgpu_virt_init_data_exchange(adev);
a90ad3c2 4998
7a3e0bb2
RZ
4999 r = amdgpu_device_fw_loading(adev);
5000 if (r)
5001 return r;
5002
a90ad3c2 5003 /* now we are okay to resume SMC/CP/SDMA */
06ec9070 5004 r = amdgpu_device_ip_reinit_late_sriov(adev);
5740682e
ML
5005 if (r)
5006 goto error;
a90ad3c2 5007
a5f67c93
ZL
5008 hive = amdgpu_get_xgmi_hive(adev);
5009 /* Update PSP FW topology after reset */
5010 if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
5011 r = amdgpu_xgmi_update_topology(hive, adev);
5012
5013 if (hive)
5014 amdgpu_put_xgmi_hive(hive);
5015
5016 if (!r) {
a5f67c93 5017 r = amdgpu_ib_ring_tests(adev);
9c12f5cd 5018
c004d44e 5019 amdgpu_amdkfd_post_reset(adev);
a5f67c93 5020 }
a90ad3c2 5021
abc34253 5022error:
c41d1cf6 5023 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
e3526257 5024 amdgpu_inc_vram_lost(adev);
c33adbc7 5025 r = amdgpu_device_recover_vram(adev);
a90ad3c2 5026 }
437f3e0b 5027 amdgpu_virt_release_full_gpu(adev, true);
a90ad3c2 5028
7258fa31
SK
5029 if (AMDGPU_RETRY_SRIOV_RESET(r)) {
5030 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
5031 retry_limit++;
5032 goto retry;
5033 } else
5034 DRM_ERROR("GPU reset retry is beyond the retry limit\n");
5035 }
5036
a90ad3c2
ML
5037 return r;
5038}
5039
9a1cddd6 5040/**
5041 * amdgpu_device_has_job_running - check if there is any job in mirror list
5042 *
982a820b 5043 * @adev: amdgpu_device pointer
9a1cddd6 5044 *
5045 * check if there is any job in mirror list
5046 */
5047bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
5048{
5049 int i;
5050 struct drm_sched_job *job;
5051
5052 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5053 struct amdgpu_ring *ring = adev->rings[i];
5054
35963cf2 5055 if (!ring || !drm_sched_wqueue_ready(&ring->sched))
9a1cddd6 5056 continue;
5057
5058 spin_lock(&ring->sched.job_list_lock);
6efa4b46
LT
5059 job = list_first_entry_or_null(&ring->sched.pending_list,
5060 struct drm_sched_job, list);
9a1cddd6 5061 spin_unlock(&ring->sched.job_list_lock);
5062 if (job)
5063 return true;
5064 }
5065 return false;
5066}
5067
12938fad
CK
5068/**
5069 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
5070 *
982a820b 5071 * @adev: amdgpu_device pointer
12938fad
CK
5072 *
5073 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
5074 * a hung GPU.
5075 */
5076bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
5077{
12938fad 5078
3ba7b418
AG
5079 if (amdgpu_gpu_recovery == 0)
5080 goto disabled;
5081
1a11a65d
YC
5082 /* Skip soft reset check in fatal error mode */
5083 if (!amdgpu_ras_is_poison_mode_supported(adev))
5084 return true;
5085
3ba7b418
AG
5086 if (amdgpu_sriov_vf(adev))
5087 return true;
5088
5089 if (amdgpu_gpu_recovery == -1) {
5090 switch (adev->asic_type) {
b3523c45
AD
5091#ifdef CONFIG_DRM_AMDGPU_SI
5092 case CHIP_VERDE:
5093 case CHIP_TAHITI:
5094 case CHIP_PITCAIRN:
5095 case CHIP_OLAND:
5096 case CHIP_HAINAN:
5097#endif
5098#ifdef CONFIG_DRM_AMDGPU_CIK
5099 case CHIP_KAVERI:
5100 case CHIP_KABINI:
5101 case CHIP_MULLINS:
5102#endif
5103 case CHIP_CARRIZO:
5104 case CHIP_STONEY:
5105 case CHIP_CYAN_SKILLFISH:
3ba7b418 5106 goto disabled;
b3523c45
AD
5107 default:
5108 break;
3ba7b418 5109 }
12938fad
CK
5110 }
5111
5112 return true;
3ba7b418
AG
5113
5114disabled:
aac89168 5115 dev_info(adev->dev, "GPU recovery disabled.\n");
3ba7b418 5116 return false;
12938fad
CK
5117}
5118
5c03e584
FX
5119int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
5120{
47fc644f
SS
5121 u32 i;
5122 int ret = 0;
5c03e584 5123
47fc644f 5124 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
5c03e584 5125
47fc644f 5126 dev_info(adev->dev, "GPU mode1 reset\n");
5c03e584 5127
47fc644f
SS
5128 /* disable BM */
5129 pci_clear_master(adev->pdev);
5c03e584 5130
47fc644f 5131 amdgpu_device_cache_pci_state(adev->pdev);
5c03e584 5132
47fc644f
SS
5133 if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
5134 dev_info(adev->dev, "GPU smu mode1 reset\n");
5135 ret = amdgpu_dpm_mode1_reset(adev);
5136 } else {
5137 dev_info(adev->dev, "GPU psp mode1 reset\n");
5138 ret = psp_gpu_reset(adev);
5139 }
5c03e584 5140
47fc644f 5141 if (ret)
7d442437 5142 goto mode1_reset_failed;
5c03e584 5143
47fc644f 5144 amdgpu_device_load_pci_state(adev->pdev);
7656168a
LL
5145 ret = amdgpu_psp_wait_for_bootloader(adev);
5146 if (ret)
7d442437 5147 goto mode1_reset_failed;
5c03e584 5148
47fc644f
SS
5149 /* wait for asic to come out of reset */
5150 for (i = 0; i < adev->usec_timeout; i++) {
5151 u32 memsize = adev->nbio.funcs->get_memsize(adev);
5c03e584 5152
47fc644f
SS
5153 if (memsize != 0xffffffff)
5154 break;
5155 udelay(1);
5156 }
5c03e584 5157
7d442437
HZ
5158 if (i >= adev->usec_timeout) {
5159 ret = -ETIMEDOUT;
5160 goto mode1_reset_failed;
5161 }
5162
47fc644f 5163 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
7656168a 5164
7d442437
HZ
5165 return 0;
5166
5167mode1_reset_failed:
5168 dev_err(adev->dev, "GPU mode1 reset failed\n");
47fc644f 5169 return ret;
5c03e584 5170}
5c6dd71e 5171
e3c1b071 5172int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
04442bf7 5173 struct amdgpu_reset_context *reset_context)
26bc5340 5174{
5c1e6fa4 5175 int i, r = 0;
04442bf7
LL
5176 struct amdgpu_job *job = NULL;
5177 bool need_full_reset =
5178 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5179
5180 if (reset_context->reset_req_dev == adev)
5181 job = reset_context->job;
71182665 5182
b602ca5f
TZ
5183 if (amdgpu_sriov_vf(adev)) {
5184 /* stop the data exchange thread */
5185 amdgpu_virt_fini_data_exchange(adev);
5186 }
5187
9e225fb9
AG
5188 amdgpu_fence_driver_isr_toggle(adev, true);
5189
71182665 5190 /* block all schedulers and reset given job's ring */
0875dc9e
CZ
5191 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5192 struct amdgpu_ring *ring = adev->rings[i];
5193
35963cf2 5194 if (!ring || !drm_sched_wqueue_ready(&ring->sched))
0875dc9e 5195 continue;
5740682e 5196
b8920e1e
SS
5197 /* Clear job fence from fence drv to avoid force_completion
5198 * leave NULL and vm flush fence in fence drv
5199 */
5c1e6fa4 5200 amdgpu_fence_driver_clear_job_fences(ring);
c530b02f 5201
2f9d4084
ML
5202 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
5203 amdgpu_fence_driver_force_completion(ring);
0875dc9e 5204 }
d38ceaf9 5205
9e225fb9
AG
5206 amdgpu_fence_driver_isr_toggle(adev, false);
5207
ff99849b 5208 if (job && job->vm)
222b5f04
AG
5209 drm_sched_increase_karma(&job->base);
5210
04442bf7 5211 r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
404b277b 5212 /* If reset handler not implemented, continue; otherwise return */
b8920e1e 5213 if (r == -EOPNOTSUPP)
404b277b
LL
5214 r = 0;
5215 else
04442bf7
LL
5216 return r;
5217
1d721ed6 5218 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
26bc5340
AG
5219 if (!amdgpu_sriov_vf(adev)) {
5220
5221 if (!need_full_reset)
5222 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
5223
360cd081
LG
5224 if (!need_full_reset && amdgpu_gpu_recovery &&
5225 amdgpu_device_ip_check_soft_reset(adev)) {
26bc5340
AG
5226 amdgpu_device_ip_pre_soft_reset(adev);
5227 r = amdgpu_device_ip_soft_reset(adev);
5228 amdgpu_device_ip_post_soft_reset(adev);
5229 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
aac89168 5230 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
26bc5340
AG
5231 need_full_reset = true;
5232 }
5233 }
5234
5235 if (need_full_reset)
5236 r = amdgpu_device_ip_suspend(adev);
04442bf7
LL
5237 if (need_full_reset)
5238 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5239 else
5240 clear_bit(AMDGPU_NEED_FULL_RESET,
5241 &reset_context->flags);
26bc5340
AG
5242 }
5243
5244 return r;
5245}
5246
15fd09a0
SA
5247static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
5248{
15fd09a0
SA
5249 int i;
5250
38a15ad9 5251 lockdep_assert_held(&adev->reset_domain->sem);
15fd09a0 5252
2d6a2a28
AA
5253 for (i = 0; i < adev->reset_info.num_regs; i++) {
5254 adev->reset_info.reset_dump_reg_value[i] =
5255 RREG32(adev->reset_info.reset_dump_reg_list[i]);
3d8785f6 5256
2d6a2a28
AA
5257 trace_amdgpu_reset_reg_dumps(adev->reset_info.reset_dump_reg_list[i],
5258 adev->reset_info.reset_dump_reg_value[i]);
3d8785f6
SA
5259 }
5260
15fd09a0 5261 return 0;
3d8785f6 5262}
3d8785f6 5263
04442bf7
LL
5264int amdgpu_do_asic_reset(struct list_head *device_list_handle,
5265 struct amdgpu_reset_context *reset_context)
26bc5340
AG
5266{
5267 struct amdgpu_device *tmp_adev = NULL;
04442bf7 5268 bool need_full_reset, skip_hw_reset, vram_lost = false;
26bc5340
AG
5269 int r = 0;
5270
04442bf7
LL
5271 /* Try reset handler method first */
5272 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5273 reset_list);
15fd09a0 5274 amdgpu_reset_reg_dumps(tmp_adev);
0a83bb35
LL
5275
5276 reset_context->reset_device_list = device_list_handle;
04442bf7 5277 r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
404b277b 5278 /* If reset handler not implemented, continue; otherwise return */
b8920e1e 5279 if (r == -EOPNOTSUPP)
404b277b
LL
5280 r = 0;
5281 else
04442bf7
LL
5282 return r;
5283
5284 /* Reset handler not implemented, use the default method */
5285 need_full_reset =
5286 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5287 skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
5288
26bc5340 5289 /*
655ce9cb 5290 * ASIC reset has to be done on all XGMI hive nodes ASAP
26bc5340
AG
5291 * to allow proper links negotiation in FW (within 1 sec)
5292 */
7ac71382 5293 if (!skip_hw_reset && need_full_reset) {
655ce9cb 5294 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
041a62bc 5295 /* For XGMI run all resets in parallel to speed up the process */
d4535e2c 5296 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
e3c1b071 5297 tmp_adev->gmc.xgmi.pending_reset = false;
c96cf282 5298 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
d4535e2c
AG
5299 r = -EALREADY;
5300 } else
5301 r = amdgpu_asic_reset(tmp_adev);
d4535e2c 5302
041a62bc 5303 if (r) {
aac89168 5304 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4a580877 5305 r, adev_to_drm(tmp_adev)->unique);
19349072 5306 goto out;
ce316fa5
LM
5307 }
5308 }
5309
041a62bc
AG
5310 /* For XGMI wait for all resets to complete before proceed */
5311 if (!r) {
655ce9cb 5312 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
ce316fa5
LM
5313 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5314 flush_work(&tmp_adev->xgmi_reset_work);
5315 r = tmp_adev->asic_reset_res;
5316 if (r)
5317 break;
ce316fa5
LM
5318 }
5319 }
5320 }
ce316fa5 5321 }
26bc5340 5322
43c4d576 5323 if (!r && amdgpu_ras_intr_triggered()) {
655ce9cb 5324 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
21226f02 5325 amdgpu_ras_reset_error_count(tmp_adev, AMDGPU_RAS_BLOCK__MMHUB);
43c4d576
JC
5326 }
5327
00eaa571 5328 amdgpu_ras_intr_cleared();
43c4d576 5329 }
00eaa571 5330
655ce9cb 5331 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
26bc5340
AG
5332 if (need_full_reset) {
5333 /* post card */
e3c1b071 5334 r = amdgpu_device_asic_init(tmp_adev);
5335 if (r) {
aac89168 5336 dev_warn(tmp_adev->dev, "asic atom init failed!");
e3c1b071 5337 } else {
26bc5340 5338 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
9cec53c1 5339
26bc5340
AG
5340 r = amdgpu_device_ip_resume_phase1(tmp_adev);
5341 if (r)
5342 goto out;
5343
5344 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
a7691785
AA
5345
5346 amdgpu_coredump(tmp_adev, vram_lost, reset_context);
5347
26bc5340 5348 if (vram_lost) {
77e7f829 5349 DRM_INFO("VRAM is lost due to GPU reset!\n");
e3526257 5350 amdgpu_inc_vram_lost(tmp_adev);
26bc5340
AG
5351 }
5352
26bc5340
AG
5353 r = amdgpu_device_fw_loading(tmp_adev);
5354 if (r)
5355 return r;
5356
c45e38f2
LL
5357 r = amdgpu_xcp_restore_partition_mode(
5358 tmp_adev->xcp_mgr);
5359 if (r)
5360 goto out;
5361
26bc5340
AG
5362 r = amdgpu_device_ip_resume_phase2(tmp_adev);
5363 if (r)
5364 goto out;
5365
b7043800
AD
5366 if (tmp_adev->mman.buffer_funcs_ring->sched.ready)
5367 amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true);
5368
26bc5340
AG
5369 if (vram_lost)
5370 amdgpu_device_fill_reset_magic(tmp_adev);
5371
fdafb359
EQ
5372 /*
5373 * Add this ASIC as tracked as reset was already
5374 * complete successfully.
5375 */
5376 amdgpu_register_gpu_instance(tmp_adev);
5377
04442bf7
LL
5378 if (!reset_context->hive &&
5379 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
e3c1b071 5380 amdgpu_xgmi_add_device(tmp_adev);
5381
7c04ca50 5382 r = amdgpu_device_ip_late_init(tmp_adev);
5383 if (r)
5384 goto out;
5385
087451f3 5386 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
565d1941 5387
e8fbaf03
GC
5388 /*
5389 * The GPU enters bad state once faulty pages
5390 * by ECC has reached the threshold, and ras
5391 * recovery is scheduled next. So add one check
5392 * here to break recovery if it indeed exceeds
5393 * bad page threshold, and remind user to
5394 * retire this GPU or setting one bigger
5395 * bad_page_threshold value to fix this once
5396 * probing driver again.
5397 */
11003c68 5398 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
e8fbaf03
GC
5399 /* must succeed. */
5400 amdgpu_ras_resume(tmp_adev);
5401 } else {
5402 r = -EINVAL;
5403 goto out;
5404 }
e79a04d5 5405
26bc5340 5406 /* Update PSP FW topology after reset */
04442bf7
LL
5407 if (reset_context->hive &&
5408 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5409 r = amdgpu_xgmi_update_topology(
5410 reset_context->hive, tmp_adev);
26bc5340
AG
5411 }
5412 }
5413
26bc5340
AG
5414out:
5415 if (!r) {
5416 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
5417 r = amdgpu_ib_ring_tests(tmp_adev);
5418 if (r) {
5419 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
26bc5340
AG
5420 need_full_reset = true;
5421 r = -EAGAIN;
5422 goto end;
5423 }
5424 }
5425
5426 if (!r)
5427 r = amdgpu_device_recover_vram(tmp_adev);
5428 else
5429 tmp_adev->asic_reset_res = r;
5430 }
5431
5432end:
04442bf7
LL
5433 if (need_full_reset)
5434 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5435 else
5436 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
26bc5340
AG
5437 return r;
5438}
5439
e923be99 5440static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
26bc5340 5441{
5740682e 5442
a3a09142
AD
5443 switch (amdgpu_asic_reset_method(adev)) {
5444 case AMD_RESET_METHOD_MODE1:
5445 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5446 break;
5447 case AMD_RESET_METHOD_MODE2:
5448 adev->mp1_state = PP_MP1_STATE_RESET;
5449 break;
5450 default:
5451 adev->mp1_state = PP_MP1_STATE_NONE;
5452 break;
5453 }
26bc5340 5454}
d38ceaf9 5455
e923be99 5456static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
26bc5340 5457{
89041940 5458 amdgpu_vf_error_trans_all(adev);
a3a09142 5459 adev->mp1_state = PP_MP1_STATE_NONE;
91fb309d
HC
5460}
5461
3f12acc8
EQ
5462static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5463{
5464 struct pci_dev *p = NULL;
5465
5466 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5467 adev->pdev->bus->number, 1);
5468 if (p) {
5469 pm_runtime_enable(&(p->dev));
5470 pm_runtime_resume(&(p->dev));
5471 }
b85e285e
YY
5472
5473 pci_dev_put(p);
3f12acc8
EQ
5474}
5475
5476static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5477{
5478 enum amd_reset_method reset_method;
5479 struct pci_dev *p = NULL;
5480 u64 expires;
5481
5482 /*
5483 * For now, only BACO and mode1 reset are confirmed
5484 * to suffer the audio issue without proper suspended.
5485 */
5486 reset_method = amdgpu_asic_reset_method(adev);
5487 if ((reset_method != AMD_RESET_METHOD_BACO) &&
5488 (reset_method != AMD_RESET_METHOD_MODE1))
5489 return -EINVAL;
5490
5491 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5492 adev->pdev->bus->number, 1);
5493 if (!p)
5494 return -ENODEV;
5495
5496 expires = pm_runtime_autosuspend_expiration(&(p->dev));
5497 if (!expires)
5498 /*
5499 * If we cannot get the audio device autosuspend delay,
5500 * a fixed 4S interval will be used. Considering 3S is
5501 * the audio controller default autosuspend delay setting.
5502 * 4S used here is guaranteed to cover that.
5503 */
54b7feb9 5504 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
3f12acc8
EQ
5505
5506 while (!pm_runtime_status_suspended(&(p->dev))) {
5507 if (!pm_runtime_suspend(&(p->dev)))
5508 break;
5509
5510 if (expires < ktime_get_mono_fast_ns()) {
5511 dev_warn(adev->dev, "failed to suspend display audio\n");
b85e285e 5512 pci_dev_put(p);
3f12acc8
EQ
5513 /* TODO: abort the succeeding gpu reset? */
5514 return -ETIMEDOUT;
5515 }
5516 }
5517
5518 pm_runtime_disable(&(p->dev));
5519
b85e285e 5520 pci_dev_put(p);
3f12acc8
EQ
5521 return 0;
5522}
5523
d193b12b 5524static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
247c7b0d
AG
5525{
5526 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5527
5528#if defined(CONFIG_DEBUG_FS)
5529 if (!amdgpu_sriov_vf(adev))
5530 cancel_work(&adev->reset_work);
5531#endif
5532
5533 if (adev->kfd.dev)
5534 cancel_work(&adev->kfd.reset_work);
5535
5536 if (amdgpu_sriov_vf(adev))
5537 cancel_work(&adev->virt.flr_work);
5538
5539 if (con && adev->ras_enabled)
5540 cancel_work(&con->recovery_work);
5541
5542}
5543
26bc5340 5544/**
6e9c65f7 5545 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
26bc5340 5546 *
982a820b 5547 * @adev: amdgpu_device pointer
26bc5340 5548 * @job: which job trigger hang
80bd2de1 5549 * @reset_context: amdgpu reset context pointer
26bc5340
AG
5550 *
5551 * Attempt to reset the GPU if it has hung (all asics).
5552 * Attempt to do soft-reset or full-reset and reinitialize Asic
5553 * Returns 0 for success or an error on failure.
5554 */
5555
cf727044 5556int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
f1549c09
LG
5557 struct amdgpu_job *job,
5558 struct amdgpu_reset_context *reset_context)
26bc5340 5559{
1d721ed6 5560 struct list_head device_list, *device_list_handle = NULL;
7dd8c205 5561 bool job_signaled = false;
26bc5340 5562 struct amdgpu_hive_info *hive = NULL;
26bc5340 5563 struct amdgpu_device *tmp_adev = NULL;
1d721ed6 5564 int i, r = 0;
bb5c7235 5565 bool need_emergency_restart = false;
3f12acc8 5566 bool audio_suspended = false;
26bc5340 5567
6e3cd2a9 5568 /*
bb5c7235
WS
5569 * Special case: RAS triggered and full reset isn't supported
5570 */
5571 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5572
d5ea093e
AG
5573 /*
5574 * Flush RAM to disk so that after reboot
5575 * the user can read log and see why the system rebooted.
5576 */
80285ae1
SY
5577 if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
5578 amdgpu_ras_get_context(adev)->reboot) {
d5ea093e
AG
5579 DRM_WARN("Emergency reboot.");
5580
5581 ksys_sync_helper();
5582 emergency_restart();
5583 }
5584
b823821f 5585 dev_info(adev->dev, "GPU %s begin!\n",
bb5c7235 5586 need_emergency_restart ? "jobs stop":"reset");
26bc5340 5587
175ac6ec
ZL
5588 if (!amdgpu_sriov_vf(adev))
5589 hive = amdgpu_get_xgmi_hive(adev);
681260df 5590 if (hive)
53b3f8f4 5591 mutex_lock(&hive->hive_lock);
26bc5340 5592
f1549c09
LG
5593 reset_context->job = job;
5594 reset_context->hive = hive;
9e94d22c
EQ
5595 /*
5596 * Build list of devices to reset.
5597 * In case we are in XGMI hive mode, resort the device list
5598 * to put adev in the 1st position.
5599 */
5600 INIT_LIST_HEAD(&device_list);
175ac6ec 5601 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
83d29a5f 5602 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
655ce9cb 5603 list_add_tail(&tmp_adev->reset_list, &device_list);
087a3e13 5604 if (adev->shutdown)
83d29a5f
YC
5605 tmp_adev->shutdown = true;
5606 }
655ce9cb 5607 if (!list_is_first(&adev->reset_list, &device_list))
5608 list_rotate_to_front(&adev->reset_list, &device_list);
5609 device_list_handle = &device_list;
26bc5340 5610 } else {
655ce9cb 5611 list_add_tail(&adev->reset_list, &device_list);
26bc5340
AG
5612 device_list_handle = &device_list;
5613 }
5614
e923be99
AG
5615 /* We need to lock reset domain only once both for XGMI and single device */
5616 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5617 reset_list);
3675c2f2 5618 amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
e923be99 5619
1d721ed6 5620 /* block all schedulers and reset given job's ring */
655ce9cb 5621 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
f287a3c5 5622
e923be99 5623 amdgpu_device_set_mp1_state(tmp_adev);
f287a3c5 5624
3f12acc8
EQ
5625 /*
5626 * Try to put the audio codec into suspend state
5627 * before gpu reset started.
5628 *
5629 * Due to the power domain of the graphics device
5630 * is shared with AZ power domain. Without this,
5631 * we may change the audio hardware from behind
5632 * the audio driver's back. That will trigger
5633 * some audio codec errors.
5634 */
5635 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5636 audio_suspended = true;
5637
9e94d22c
EQ
5638 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5639
52fb44cf
EQ
5640 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5641
c004d44e 5642 if (!amdgpu_sriov_vf(tmp_adev))
428890a3 5643 amdgpu_amdkfd_pre_reset(tmp_adev);
9e94d22c 5644
12ffa55d
AG
5645 /*
5646 * Mark these ASICs to be reseted as untracked first
5647 * And add them back after reset completed
5648 */
5649 amdgpu_unregister_gpu_instance(tmp_adev);
5650
163d4cd2 5651 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
565d1941 5652
f1c1314b 5653 /* disable ras on ALL IPs */
bb5c7235 5654 if (!need_emergency_restart &&
b823821f 5655 amdgpu_device_ip_need_full_reset(tmp_adev))
f1c1314b 5656 amdgpu_ras_suspend(tmp_adev);
5657
1d721ed6
AG
5658 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5659 struct amdgpu_ring *ring = tmp_adev->rings[i];
5660
35963cf2 5661 if (!ring || !drm_sched_wqueue_ready(&ring->sched))
1d721ed6
AG
5662 continue;
5663
0b2d2c2e 5664 drm_sched_stop(&ring->sched, job ? &job->base : NULL);
7c6e68c7 5665
bb5c7235 5666 if (need_emergency_restart)
7c6e68c7 5667 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
1d721ed6 5668 }
8f8c80f4 5669 atomic_inc(&tmp_adev->gpu_reset_counter);
1d721ed6
AG
5670 }
5671
bb5c7235 5672 if (need_emergency_restart)
7c6e68c7
AG
5673 goto skip_sched_resume;
5674
1d721ed6
AG
5675 /*
5676 * Must check guilty signal here since after this point all old
5677 * HW fences are force signaled.
5678 *
5679 * job->base holds a reference to parent fence
5680 */
f6a3f660 5681 if (job && dma_fence_is_signaled(&job->hw_fence)) {
1d721ed6 5682 job_signaled = true;
1d721ed6
AG
5683 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5684 goto skip_hw_reset;
5685 }
5686
26bc5340 5687retry: /* Rest of adevs pre asic reset from XGMI hive. */
655ce9cb 5688 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
f1549c09 5689 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
26bc5340
AG
5690 /*TODO Should we stop ?*/
5691 if (r) {
aac89168 5692 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
4a580877 5693 r, adev_to_drm(tmp_adev)->unique);
26bc5340
AG
5694 tmp_adev->asic_reset_res = r;
5695 }
247c7b0d
AG
5696
5697 /*
5698 * Drop all pending non scheduler resets. Scheduler resets
5699 * were already dropped during drm_sched_stop
5700 */
d193b12b 5701 amdgpu_device_stop_pending_resets(tmp_adev);
26bc5340
AG
5702 }
5703
5704 /* Actual ASIC resets if needed.*/
4f30d920 5705 /* Host driver will handle XGMI hive reset for SRIOV */
26bc5340
AG
5706 if (amdgpu_sriov_vf(adev)) {
5707 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5708 if (r)
5709 adev->asic_reset_res = r;
950d6425 5710
28606c4e 5711 /* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */
4e8303cf
LL
5712 if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
5713 IP_VERSION(9, 4, 2) ||
5714 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3))
950d6425 5715 amdgpu_ras_resume(adev);
26bc5340 5716 } else {
f1549c09 5717 r = amdgpu_do_asic_reset(device_list_handle, reset_context);
b98a1648 5718 if (r && r == -EAGAIN)
26bc5340
AG
5719 goto retry;
5720 }
5721
1d721ed6
AG
5722skip_hw_reset:
5723
26bc5340 5724 /* Post ASIC reset for all devs .*/
655ce9cb 5725 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
7c6e68c7 5726
1d721ed6
AG
5727 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5728 struct amdgpu_ring *ring = tmp_adev->rings[i];
5729
35963cf2 5730 if (!ring || !drm_sched_wqueue_ready(&ring->sched))
1d721ed6
AG
5731 continue;
5732
6868a2c4 5733 drm_sched_start(&ring->sched, true);
1d721ed6
AG
5734 }
5735
b8920e1e 5736 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
4a580877 5737 drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
1d721ed6 5738
7258fa31
SK
5739 if (tmp_adev->asic_reset_res)
5740 r = tmp_adev->asic_reset_res;
5741
1d721ed6 5742 tmp_adev->asic_reset_res = 0;
26bc5340
AG
5743
5744 if (r) {
5745 /* bad news, how to tell it to userspace ? */
12ffa55d 5746 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
26bc5340
AG
5747 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5748 } else {
12ffa55d 5749 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
3fa8f89d
S
5750 if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5751 DRM_WARN("smart shift update failed\n");
26bc5340 5752 }
7c6e68c7 5753 }
26bc5340 5754
7c6e68c7 5755skip_sched_resume:
655ce9cb 5756 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
428890a3 5757 /* unlock kfd: SRIOV would do it separately */
c004d44e 5758 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
428890a3 5759 amdgpu_amdkfd_post_reset(tmp_adev);
8e2712e7 5760
5761 /* kfd_post_reset will do nothing if kfd device is not initialized,
5762 * need to bring up kfd here if it's not be initialized before
5763 */
5764 if (!adev->kfd.init_complete)
5765 amdgpu_amdkfd_device_init(adev);
5766
3f12acc8
EQ
5767 if (audio_suspended)
5768 amdgpu_device_resume_display_audio(tmp_adev);
e923be99
AG
5769
5770 amdgpu_device_unset_mp1_state(tmp_adev);
d293470e
YC
5771
5772 amdgpu_ras_set_error_query_ready(tmp_adev, true);
26bc5340
AG
5773 }
5774
e923be99
AG
5775 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5776 reset_list);
5777 amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5778
9e94d22c 5779 if (hive) {
9e94d22c 5780 mutex_unlock(&hive->hive_lock);
d95e8e97 5781 amdgpu_put_xgmi_hive(hive);
9e94d22c 5782 }
26bc5340 5783
f287a3c5 5784 if (r)
26bc5340 5785 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
ab9a0b1f
AG
5786
5787 atomic_set(&adev->reset_domain->reset_res, r);
d38ceaf9
AD
5788 return r;
5789}
5790
466a7d11
ML
5791/**
5792 * amdgpu_device_partner_bandwidth - find the bandwidth of appropriate partner
5793 *
5794 * @adev: amdgpu_device pointer
5795 * @speed: pointer to the speed of the link
5796 * @width: pointer to the width of the link
5797 *
5798 * Evaluate the hierarchy to find the speed and bandwidth capabilities of the
5799 * first physical partner to an AMD dGPU.
5800 * This will exclude any virtual switches and links.
5801 */
5802static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
5803 enum pci_bus_speed *speed,
5804 enum pcie_link_width *width)
5805{
5806 struct pci_dev *parent = adev->pdev;
5807
5808 if (!speed || !width)
5809 return;
5810
5811 *speed = PCI_SPEED_UNKNOWN;
5812 *width = PCIE_LNK_WIDTH_UNKNOWN;
5813
5814 while ((parent = pci_upstream_bridge(parent))) {
5815 /* skip upstream/downstream switches internal to dGPU*/
5816 if (parent->vendor == PCI_VENDOR_ID_ATI)
5817 continue;
5818 *speed = pcie_get_speed_cap(parent);
5819 *width = pcie_get_width_cap(parent);
5820 break;
5821 }
5822}
5823
e3ecdffa
AD
5824/**
5825 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5826 *
5827 * @adev: amdgpu_device pointer
5828 *
5829 * Fetchs and stores in the driver the PCIE capabilities (gen speed
5830 * and lanes) of the slot the device is in. Handles APUs and
5831 * virtualized environments where PCIE config space may not be available.
5832 */
5494d864 5833static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
d0dd7f0c 5834{
5d9a6330 5835 struct pci_dev *pdev;
c5313457
HK
5836 enum pci_bus_speed speed_cap, platform_speed_cap;
5837 enum pcie_link_width platform_link_width;
d0dd7f0c 5838
cd474ba0
AD
5839 if (amdgpu_pcie_gen_cap)
5840 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
d0dd7f0c 5841
cd474ba0
AD
5842 if (amdgpu_pcie_lane_cap)
5843 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
d0dd7f0c 5844
cd474ba0 5845 /* covers APUs as well */
04e85958 5846 if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) {
cd474ba0
AD
5847 if (adev->pm.pcie_gen_mask == 0)
5848 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5849 if (adev->pm.pcie_mlw_mask == 0)
5850 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c 5851 return;
cd474ba0 5852 }
d0dd7f0c 5853
c5313457
HK
5854 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5855 return;
5856
466a7d11
ML
5857 amdgpu_device_partner_bandwidth(adev, &platform_speed_cap,
5858 &platform_link_width);
c5313457 5859
cd474ba0 5860 if (adev->pm.pcie_gen_mask == 0) {
5d9a6330
AD
5861 /* asic caps */
5862 pdev = adev->pdev;
5863 speed_cap = pcie_get_speed_cap(pdev);
5864 if (speed_cap == PCI_SPEED_UNKNOWN) {
5865 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
cd474ba0
AD
5866 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5867 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
cd474ba0 5868 } else {
2b3a1f51
FX
5869 if (speed_cap == PCIE_SPEED_32_0GT)
5870 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5871 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5872 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5873 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5874 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5875 else if (speed_cap == PCIE_SPEED_16_0GT)
5d9a6330
AD
5876 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5877 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5878 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5879 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5880 else if (speed_cap == PCIE_SPEED_8_0GT)
5881 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5882 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5883 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5884 else if (speed_cap == PCIE_SPEED_5_0GT)
5885 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5886 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5887 else
5888 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5889 }
5890 /* platform caps */
c5313457 5891 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5d9a6330
AD
5892 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5893 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5894 } else {
2b3a1f51
FX
5895 if (platform_speed_cap == PCIE_SPEED_32_0GT)
5896 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5897 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5898 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5899 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5900 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5901 else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5d9a6330
AD
5902 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5903 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5904 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5905 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
c5313457 5906 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5d9a6330
AD
5907 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5908 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5909 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
c5313457 5910 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5d9a6330
AD
5911 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5912 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5913 else
5914 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5915
cd474ba0
AD
5916 }
5917 }
5918 if (adev->pm.pcie_mlw_mask == 0) {
c5313457 5919 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5d9a6330
AD
5920 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5921 } else {
c5313457 5922 switch (platform_link_width) {
5d9a6330 5923 case PCIE_LNK_X32:
cd474ba0
AD
5924 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5925 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5926 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5927 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5928 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5929 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5930 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5931 break;
5d9a6330 5932 case PCIE_LNK_X16:
cd474ba0
AD
5933 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5934 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5935 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5936 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5937 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5938 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5939 break;
5d9a6330 5940 case PCIE_LNK_X12:
cd474ba0
AD
5941 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5942 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5943 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5944 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5945 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5946 break;
5d9a6330 5947 case PCIE_LNK_X8:
cd474ba0
AD
5948 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5949 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5950 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5951 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5952 break;
5d9a6330 5953 case PCIE_LNK_X4:
cd474ba0
AD
5954 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5955 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5956 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5957 break;
5d9a6330 5958 case PCIE_LNK_X2:
cd474ba0
AD
5959 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5960 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5961 break;
5d9a6330 5962 case PCIE_LNK_X1:
cd474ba0
AD
5963 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5964 break;
5965 default:
5966 break;
5967 }
d0dd7f0c
AD
5968 }
5969 }
5970}
d38ceaf9 5971
08a2fd23
RE
5972/**
5973 * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5974 *
5975 * @adev: amdgpu_device pointer
5976 * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5977 *
5978 * Return true if @peer_adev can access (DMA) @adev through the PCIe
5979 * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5980 * @peer_adev.
5981 */
5982bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5983 struct amdgpu_device *peer_adev)
5984{
5985#ifdef CONFIG_HSA_AMD_P2P
5986 uint64_t address_mask = peer_adev->dev->dma_mask ?
5987 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5988 resource_size_t aper_limit =
5989 adev->gmc.aper_base + adev->gmc.aper_size - 1;
bb66ecbf
LL
5990 bool p2p_access =
5991 !adev->gmc.xgmi.connected_to_cpu &&
5992 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
08a2fd23
RE
5993
5994 return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5995 adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5996 !(adev->gmc.aper_base & address_mask ||
5997 aper_limit & address_mask));
5998#else
5999 return false;
6000#endif
6001}
6002
361dbd01
AD
6003int amdgpu_device_baco_enter(struct drm_device *dev)
6004{
1348969a 6005 struct amdgpu_device *adev = drm_to_adev(dev);
7a22677b 6006 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
361dbd01 6007
6ab68650 6008 if (!amdgpu_device_supports_baco(dev))
361dbd01
AD
6009 return -ENOTSUPP;
6010
8ab0d6f0 6011 if (ras && adev->ras_enabled &&
acdae216 6012 adev->nbio.funcs->enable_doorbell_interrupt)
7a22677b
LM
6013 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
6014
9530273e 6015 return amdgpu_dpm_baco_enter(adev);
361dbd01
AD
6016}
6017
6018int amdgpu_device_baco_exit(struct drm_device *dev)
6019{
1348969a 6020 struct amdgpu_device *adev = drm_to_adev(dev);
7a22677b 6021 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
9530273e 6022 int ret = 0;
361dbd01 6023
6ab68650 6024 if (!amdgpu_device_supports_baco(dev))
361dbd01
AD
6025 return -ENOTSUPP;
6026
9530273e
EQ
6027 ret = amdgpu_dpm_baco_exit(adev);
6028 if (ret)
6029 return ret;
7a22677b 6030
8ab0d6f0 6031 if (ras && adev->ras_enabled &&
acdae216 6032 adev->nbio.funcs->enable_doorbell_interrupt)
7a22677b
LM
6033 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
6034
1bece222
CL
6035 if (amdgpu_passthrough(adev) &&
6036 adev->nbio.funcs->clear_doorbell_interrupt)
6037 adev->nbio.funcs->clear_doorbell_interrupt(adev);
6038
7a22677b 6039 return 0;
361dbd01 6040}
c9a6b82f
AG
6041
6042/**
6043 * amdgpu_pci_error_detected - Called when a PCI error is detected.
6044 * @pdev: PCI device struct
6045 * @state: PCI channel state
6046 *
6047 * Description: Called when a PCI error is detected.
6048 *
6049 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
6050 */
6051pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
6052{
6053 struct drm_device *dev = pci_get_drvdata(pdev);
6054 struct amdgpu_device *adev = drm_to_adev(dev);
acd89fca 6055 int i;
c9a6b82f
AG
6056
6057 DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
6058
6894305c
AG
6059 if (adev->gmc.xgmi.num_physical_nodes > 1) {
6060 DRM_WARN("No support for XGMI hive yet...");
6061 return PCI_ERS_RESULT_DISCONNECT;
6062 }
6063
e17e27f9
GC
6064 adev->pci_channel_state = state;
6065
c9a6b82f
AG
6066 switch (state) {
6067 case pci_channel_io_normal:
6068 return PCI_ERS_RESULT_CAN_RECOVER;
acd89fca 6069 /* Fatal error, prepare for slot reset */
8a11d283
TZ
6070 case pci_channel_io_frozen:
6071 /*
d0fb18b5 6072 * Locking adev->reset_domain->sem will prevent any external access
acd89fca
AG
6073 * to GPU during PCI error recovery
6074 */
3675c2f2 6075 amdgpu_device_lock_reset_domain(adev->reset_domain);
e923be99 6076 amdgpu_device_set_mp1_state(adev);
acd89fca
AG
6077
6078 /*
6079 * Block any work scheduling as we do for regular GPU reset
6080 * for the duration of the recovery
6081 */
6082 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
6083 struct amdgpu_ring *ring = adev->rings[i];
6084
35963cf2 6085 if (!ring || !drm_sched_wqueue_ready(&ring->sched))
acd89fca
AG
6086 continue;
6087
6088 drm_sched_stop(&ring->sched, NULL);
6089 }
8f8c80f4 6090 atomic_inc(&adev->gpu_reset_counter);
c9a6b82f
AG
6091 return PCI_ERS_RESULT_NEED_RESET;
6092 case pci_channel_io_perm_failure:
6093 /* Permanent error, prepare for device removal */
6094 return PCI_ERS_RESULT_DISCONNECT;
6095 }
6096
6097 return PCI_ERS_RESULT_NEED_RESET;
6098}
6099
6100/**
6101 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
6102 * @pdev: pointer to PCI device
6103 */
6104pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
6105{
6106
6107 DRM_INFO("PCI error: mmio enabled callback!!\n");
6108
6109 /* TODO - dump whatever for debugging purposes */
6110
6111 /* This called only if amdgpu_pci_error_detected returns
6112 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
6113 * works, no need to reset slot.
6114 */
6115
6116 return PCI_ERS_RESULT_RECOVERED;
6117}
6118
6119/**
6120 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
6121 * @pdev: PCI device struct
6122 *
6123 * Description: This routine is called by the pci error recovery
6124 * code after the PCI slot has been reset, just before we
6125 * should resume normal operations.
6126 */
6127pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
6128{
6129 struct drm_device *dev = pci_get_drvdata(pdev);
6130 struct amdgpu_device *adev = drm_to_adev(dev);
362c7b91 6131 int r, i;
04442bf7 6132 struct amdgpu_reset_context reset_context;
362c7b91 6133 u32 memsize;
7ac71382 6134 struct list_head device_list;
c9a6b82f
AG
6135
6136 DRM_INFO("PCI error: slot reset callback!!\n");
6137
04442bf7
LL
6138 memset(&reset_context, 0, sizeof(reset_context));
6139
7ac71382 6140 INIT_LIST_HEAD(&device_list);
655ce9cb 6141 list_add_tail(&adev->reset_list, &device_list);
7ac71382 6142
362c7b91
AG
6143 /* wait for asic to come out of reset */
6144 msleep(500);
6145
7ac71382 6146 /* Restore PCI confspace */
c1dd4aa6 6147 amdgpu_device_load_pci_state(pdev);
c9a6b82f 6148
362c7b91
AG
6149 /* confirm ASIC came out of reset */
6150 for (i = 0; i < adev->usec_timeout; i++) {
6151 memsize = amdgpu_asic_get_config_memsize(adev);
6152
6153 if (memsize != 0xffffffff)
6154 break;
6155 udelay(1);
6156 }
6157 if (memsize == 0xffffffff) {
6158 r = -ETIME;
6159 goto out;
6160 }
6161
04442bf7
LL
6162 reset_context.method = AMD_RESET_METHOD_NONE;
6163 reset_context.reset_req_dev = adev;
6164 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
6165 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
6166
7afefb81 6167 adev->no_hw_access = true;
04442bf7 6168 r = amdgpu_device_pre_asic_reset(adev, &reset_context);
7afefb81 6169 adev->no_hw_access = false;
c9a6b82f
AG
6170 if (r)
6171 goto out;
6172
04442bf7 6173 r = amdgpu_do_asic_reset(&device_list, &reset_context);
c9a6b82f
AG
6174
6175out:
c9a6b82f 6176 if (!r) {
c1dd4aa6
AG
6177 if (amdgpu_device_cache_pci_state(adev->pdev))
6178 pci_restore_state(adev->pdev);
6179
c9a6b82f
AG
6180 DRM_INFO("PCIe error recovery succeeded\n");
6181 } else {
6182 DRM_ERROR("PCIe error recovery failed, err:%d", r);
e923be99
AG
6183 amdgpu_device_unset_mp1_state(adev);
6184 amdgpu_device_unlock_reset_domain(adev->reset_domain);
c9a6b82f
AG
6185 }
6186
6187 return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
6188}
6189
6190/**
6191 * amdgpu_pci_resume() - resume normal ops after PCI reset
6192 * @pdev: pointer to PCI device
6193 *
6194 * Called when the error recovery driver tells us that its
505199a3 6195 * OK to resume normal operation.
c9a6b82f
AG
6196 */
6197void amdgpu_pci_resume(struct pci_dev *pdev)
6198{
6199 struct drm_device *dev = pci_get_drvdata(pdev);
6200 struct amdgpu_device *adev = drm_to_adev(dev);
acd89fca 6201 int i;
c9a6b82f 6202
c9a6b82f
AG
6203
6204 DRM_INFO("PCI error: resume callback!!\n");
acd89fca 6205
e17e27f9
GC
6206 /* Only continue execution for the case of pci_channel_io_frozen */
6207 if (adev->pci_channel_state != pci_channel_io_frozen)
6208 return;
6209
acd89fca
AG
6210 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
6211 struct amdgpu_ring *ring = adev->rings[i];
6212
35963cf2 6213 if (!ring || !drm_sched_wqueue_ready(&ring->sched))
acd89fca
AG
6214 continue;
6215
acd89fca
AG
6216 drm_sched_start(&ring->sched, true);
6217 }
6218
e923be99
AG
6219 amdgpu_device_unset_mp1_state(adev);
6220 amdgpu_device_unlock_reset_domain(adev->reset_domain);
c9a6b82f 6221}
c1dd4aa6
AG
6222
6223bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
6224{
6225 struct drm_device *dev = pci_get_drvdata(pdev);
6226 struct amdgpu_device *adev = drm_to_adev(dev);
6227 int r;
6228
6229 r = pci_save_state(pdev);
6230 if (!r) {
6231 kfree(adev->pci_state);
6232
6233 adev->pci_state = pci_store_saved_state(pdev);
6234
6235 if (!adev->pci_state) {
6236 DRM_ERROR("Failed to store PCI saved state");
6237 return false;
6238 }
6239 } else {
6240 DRM_WARN("Failed to save PCI state, err:%d\n", r);
6241 return false;
6242 }
6243
6244 return true;
6245}
6246
6247bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
6248{
6249 struct drm_device *dev = pci_get_drvdata(pdev);
6250 struct amdgpu_device *adev = drm_to_adev(dev);
6251 int r;
6252
6253 if (!adev->pci_state)
6254 return false;
6255
6256 r = pci_load_saved_state(pdev, adev->pci_state);
6257
6258 if (!r) {
6259 pci_restore_state(pdev);
6260 } else {
6261 DRM_WARN("Failed to load PCI state, err:%d\n", r);
6262 return false;
6263 }
6264
6265 return true;
6266}
6267
810085dd
EH
6268void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
6269 struct amdgpu_ring *ring)
6270{
6271#ifdef CONFIG_X86_64
b818a5d3 6272 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
810085dd
EH
6273 return;
6274#endif
6275 if (adev->gmc.xgmi.connected_to_cpu)
6276 return;
6277
6278 if (ring && ring->funcs->emit_hdp_flush)
6279 amdgpu_ring_emit_hdp_flush(ring);
6280 else
6281 amdgpu_asic_flush_hdp(adev, ring);
6282}
c1dd4aa6 6283
810085dd
EH
6284void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
6285 struct amdgpu_ring *ring)
6286{
6287#ifdef CONFIG_X86_64
b818a5d3 6288 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
810085dd
EH
6289 return;
6290#endif
6291 if (adev->gmc.xgmi.connected_to_cpu)
6292 return;
c1dd4aa6 6293
810085dd
EH
6294 amdgpu_asic_invalidate_hdp(adev, ring);
6295}
34f3a4a9 6296
89a7a870
AG
6297int amdgpu_in_reset(struct amdgpu_device *adev)
6298{
6299 return atomic_read(&adev->reset_domain->in_gpu_reset);
53a17b6b
TZ
6300}
6301
34f3a4a9
LY
6302/**
6303 * amdgpu_device_halt() - bring hardware to some kind of halt state
6304 *
6305 * @adev: amdgpu_device pointer
6306 *
6307 * Bring hardware to some kind of halt state so that no one can touch it
6308 * any more. It will help to maintain error context when error occurred.
6309 * Compare to a simple hang, the system will keep stable at least for SSH
6310 * access. Then it should be trivial to inspect the hardware state and
6311 * see what's going on. Implemented as following:
6312 *
6313 * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
6314 * clears all CPU mappings to device, disallows remappings through page faults
6315 * 2. amdgpu_irq_disable_all() disables all interrupts
6316 * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
6317 * 4. set adev->no_hw_access to avoid potential crashes after setp 5
6318 * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
6319 * 6. pci_disable_device() and pci_wait_for_pending_transaction()
6320 * flush any in flight DMA operations
6321 */
6322void amdgpu_device_halt(struct amdgpu_device *adev)
6323{
6324 struct pci_dev *pdev = adev->pdev;
e0f943b4 6325 struct drm_device *ddev = adev_to_drm(adev);
34f3a4a9 6326
2c1c7ba4 6327 amdgpu_xcp_dev_unplug(adev);
34f3a4a9
LY
6328 drm_dev_unplug(ddev);
6329
6330 amdgpu_irq_disable_all(adev);
6331
6332 amdgpu_fence_driver_hw_fini(adev);
6333
6334 adev->no_hw_access = true;
6335
6336 amdgpu_device_unmap_mmio(adev);
6337
6338 pci_disable_device(pdev);
6339 pci_wait_for_pending_transaction(pdev);
6340}
86700a40
XD
6341
6342u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
6343 u32 reg)
6344{
6345 unsigned long flags, address, data;
6346 u32 r;
6347
6348 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6349 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6350
6351 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6352 WREG32(address, reg * 4);
6353 (void)RREG32(address);
6354 r = RREG32(data);
6355 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6356 return r;
6357}
6358
6359void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
6360 u32 reg, u32 v)
6361{
6362 unsigned long flags, address, data;
6363
6364 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6365 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6366
6367 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6368 WREG32(address, reg * 4);
6369 (void)RREG32(address);
6370 WREG32(data, v);
6371 (void)RREG32(data);
6372 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6373}
68ce8b24
CK
6374
6375/**
6376 * amdgpu_device_switch_gang - switch to a new gang
6377 * @adev: amdgpu_device pointer
6378 * @gang: the gang to switch to
6379 *
6380 * Try to switch to a new gang.
6381 * Returns: NULL if we switched to the new gang or a reference to the current
6382 * gang leader.
6383 */
6384struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
6385 struct dma_fence *gang)
6386{
6387 struct dma_fence *old = NULL;
6388
6389 do {
6390 dma_fence_put(old);
6391 rcu_read_lock();
6392 old = dma_fence_get_rcu_safe(&adev->gang_submit);
6393 rcu_read_unlock();
6394
6395 if (old == gang)
6396 break;
6397
6398 if (!dma_fence_is_signaled(old))
6399 return old;
6400
6401 } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
6402 old, gang) != old);
6403
6404 dma_fence_put(old);
6405 return NULL;
6406}
220c8cc8
AD
6407
6408bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
6409{
6410 switch (adev->asic_type) {
6411#ifdef CONFIG_DRM_AMDGPU_SI
6412 case CHIP_HAINAN:
6413#endif
6414 case CHIP_TOPAZ:
6415 /* chips with no display hardware */
6416 return false;
6417#ifdef CONFIG_DRM_AMDGPU_SI
6418 case CHIP_TAHITI:
6419 case CHIP_PITCAIRN:
6420 case CHIP_VERDE:
6421 case CHIP_OLAND:
6422#endif
6423#ifdef CONFIG_DRM_AMDGPU_CIK
6424 case CHIP_BONAIRE:
6425 case CHIP_HAWAII:
6426 case CHIP_KAVERI:
6427 case CHIP_KABINI:
6428 case CHIP_MULLINS:
6429#endif
6430 case CHIP_TONGA:
6431 case CHIP_FIJI:
6432 case CHIP_POLARIS10:
6433 case CHIP_POLARIS11:
6434 case CHIP_POLARIS12:
6435 case CHIP_VEGAM:
6436 case CHIP_CARRIZO:
6437 case CHIP_STONEY:
6438 /* chips with display hardware */
6439 return true;
6440 default:
6441 /* IP discovery */
4e8303cf 6442 if (!amdgpu_ip_version(adev, DCE_HWIP, 0) ||
220c8cc8
AD
6443 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
6444 return false;
6445 return true;
6446 }
6447}
81283fee
JZ
6448
6449uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
6450 uint32_t inst, uint32_t reg_addr, char reg_name[],
6451 uint32_t expected_value, uint32_t mask)
6452{
6453 uint32_t ret = 0;
6454 uint32_t old_ = 0;
6455 uint32_t tmp_ = RREG32(reg_addr);
6456 uint32_t loop = adev->usec_timeout;
6457
6458 while ((tmp_ & (mask)) != (expected_value)) {
6459 if (old_ != tmp_) {
6460 loop = adev->usec_timeout;
6461 old_ = tmp_;
6462 } else
6463 udelay(1);
6464 tmp_ = RREG32(reg_addr);
6465 loop--;
6466 if (!loop) {
6467 DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
6468 inst, reg_name, (uint32_t)expected_value,
6469 (uint32_t)(tmp_ & (mask)));
6470 ret = -ETIMEDOUT;
6471 break;
6472 }
6473 }
6474 return ret;
6475}