2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
35 #include <linux/pci-p2pdma.h>
36 #include <linux/apple-gmux.h>
38 #include <drm/drm_aperture.h>
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_crtc_helper.h>
41 #include <drm/drm_fb_helper.h>
42 #include <drm/drm_probe_helper.h>
43 #include <drm/amdgpu_drm.h>
44 #include <linux/device.h>
45 #include <linux/vgaarb.h>
46 #include <linux/vga_switcheroo.h>
47 #include <linux/efi.h>
49 #include "amdgpu_trace.h"
50 #include "amdgpu_i2c.h"
52 #include "amdgpu_atombios.h"
53 #include "amdgpu_atomfirmware.h"
55 #ifdef CONFIG_DRM_AMDGPU_SI
58 #ifdef CONFIG_DRM_AMDGPU_CIK
64 #include "bif/bif_4_1_d.h"
65 #include <linux/firmware.h>
66 #include "amdgpu_vf_error.h"
68 #include "amdgpu_amdkfd.h"
69 #include "amdgpu_pm.h"
71 #include "amdgpu_xgmi.h"
72 #include "amdgpu_ras.h"
73 #include "amdgpu_pmu.h"
74 #include "amdgpu_fru_eeprom.h"
75 #include "amdgpu_reset.h"
76 #include "amdgpu_virt.h"
78 #include <linux/suspend.h>
79 #include <drm/task_barrier.h>
80 #include <linux/pm_runtime.h>
82 #include <drm/drm_drv.h>
84 #if IS_ENABLED(CONFIG_X86)
85 #include <asm/intel-family.h>
88 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
89 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
90 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
91 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
92 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
93 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
94 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
96 #define AMDGPU_RESUME_MS 2000
97 #define AMDGPU_MAX_RETRY_LIMIT 2
98 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
100 static const struct drm_driver amdgpu_kms_driver;
102 const char *amdgpu_asic_name[] = {
144 * DOC: pcie_replay_count
146 * The amdgpu driver provides a sysfs API for reporting the total number
147 * of PCIe replays (NAKs)
148 * The file pcie_replay_count is used for this and returns the total
149 * number of replays as a sum of the NAKs generated and NAKs received
152 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
153 struct device_attribute *attr, char *buf)
155 struct drm_device *ddev = dev_get_drvdata(dev);
156 struct amdgpu_device *adev = drm_to_adev(ddev);
157 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
159 return sysfs_emit(buf, "%llu\n", cnt);
162 static DEVICE_ATTR(pcie_replay_count, 0444,
163 amdgpu_device_get_pcie_replay_count, NULL);
165 static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj,
166 struct bin_attribute *attr, char *buf,
167 loff_t ppos, size_t count)
169 struct device *dev = kobj_to_dev(kobj);
170 struct drm_device *ddev = dev_get_drvdata(dev);
171 struct amdgpu_device *adev = drm_to_adev(ddev);
175 case AMDGPU_SYS_REG_STATE_XGMI:
176 bytes_read = amdgpu_asic_get_reg_state(
177 adev, AMDGPU_REG_STATE_TYPE_XGMI, buf, count);
179 case AMDGPU_SYS_REG_STATE_WAFL:
180 bytes_read = amdgpu_asic_get_reg_state(
181 adev, AMDGPU_REG_STATE_TYPE_WAFL, buf, count);
183 case AMDGPU_SYS_REG_STATE_PCIE:
184 bytes_read = amdgpu_asic_get_reg_state(
185 adev, AMDGPU_REG_STATE_TYPE_PCIE, buf, count);
187 case AMDGPU_SYS_REG_STATE_USR:
188 bytes_read = amdgpu_asic_get_reg_state(
189 adev, AMDGPU_REG_STATE_TYPE_USR, buf, count);
191 case AMDGPU_SYS_REG_STATE_USR_1:
192 bytes_read = amdgpu_asic_get_reg_state(
193 adev, AMDGPU_REG_STATE_TYPE_USR_1, buf, count);
202 BIN_ATTR(reg_state, 0444, amdgpu_sysfs_reg_state_get, NULL,
203 AMDGPU_SYS_REG_STATE_END);
205 int amdgpu_reg_state_sysfs_init(struct amdgpu_device *adev)
209 if (!amdgpu_asic_get_reg_state_supported(adev))
212 ret = sysfs_create_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
217 void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev)
219 if (!amdgpu_asic_get_reg_state_supported(adev))
221 sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
227 * The amdgpu driver provides a sysfs API for giving board related information.
228 * It provides the form factor information in the format
232 * Possible form factor values
234 * - "cem" - PCIE CEM card
235 * - "oam" - Open Compute Accelerator Module
236 * - "unknown" - Not known
240 static ssize_t amdgpu_device_get_board_info(struct device *dev,
241 struct device_attribute *attr,
244 struct drm_device *ddev = dev_get_drvdata(dev);
245 struct amdgpu_device *adev = drm_to_adev(ddev);
246 enum amdgpu_pkg_type pkg_type = AMDGPU_PKG_TYPE_CEM;
249 if (adev->smuio.funcs && adev->smuio.funcs->get_pkg_type)
250 pkg_type = adev->smuio.funcs->get_pkg_type(adev);
253 case AMDGPU_PKG_TYPE_CEM:
256 case AMDGPU_PKG_TYPE_OAM:
264 return sysfs_emit(buf, "%s : %s\n", "type", pkg);
267 static DEVICE_ATTR(board_info, 0444, amdgpu_device_get_board_info, NULL);
269 static struct attribute *amdgpu_board_attrs[] = {
270 &dev_attr_board_info.attr,
274 static umode_t amdgpu_board_attrs_is_visible(struct kobject *kobj,
275 struct attribute *attr, int n)
277 struct device *dev = kobj_to_dev(kobj);
278 struct drm_device *ddev = dev_get_drvdata(dev);
279 struct amdgpu_device *adev = drm_to_adev(ddev);
281 if (adev->flags & AMD_IS_APU)
287 static const struct attribute_group amdgpu_board_attrs_group = {
288 .attrs = amdgpu_board_attrs,
289 .is_visible = amdgpu_board_attrs_is_visible
292 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
296 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
298 * @dev: drm_device pointer
300 * Returns true if the device is a dGPU with ATPX power control,
301 * otherwise return false.
303 bool amdgpu_device_supports_px(struct drm_device *dev)
305 struct amdgpu_device *adev = drm_to_adev(dev);
307 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
313 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
315 * @dev: drm_device pointer
317 * Returns true if the device is a dGPU with ACPI power control,
318 * otherwise return false.
320 bool amdgpu_device_supports_boco(struct drm_device *dev)
322 struct amdgpu_device *adev = drm_to_adev(dev);
325 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
331 * amdgpu_device_supports_baco - Does the device support BACO
333 * @dev: drm_device pointer
335 * Returns true if the device supporte BACO,
336 * otherwise return false.
338 bool amdgpu_device_supports_baco(struct drm_device *dev)
340 struct amdgpu_device *adev = drm_to_adev(dev);
342 return amdgpu_asic_supports_baco(adev);
346 * amdgpu_device_supports_smart_shift - Is the device dGPU with
347 * smart shift support
349 * @dev: drm_device pointer
351 * Returns true if the device is a dGPU with Smart Shift support,
352 * otherwise returns false.
354 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
356 return (amdgpu_device_supports_boco(dev) &&
357 amdgpu_acpi_is_power_shift_control_supported());
361 * VRAM access helper functions
365 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
367 * @adev: amdgpu_device pointer
368 * @pos: offset of the buffer in vram
369 * @buf: virtual address of the buffer in system memory
370 * @size: read/write size, sizeof(@buf) must > @size
371 * @write: true - write to vram, otherwise - read from vram
373 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
374 void *buf, size_t size, bool write)
377 uint32_t hi = ~0, tmp = 0;
378 uint32_t *data = buf;
382 if (!drm_dev_enter(adev_to_drm(adev), &idx))
385 BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
387 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
388 for (last = pos + size; pos < last; pos += 4) {
391 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
393 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
397 WREG32_NO_KIQ(mmMM_DATA, *data++);
399 *data++ = RREG32_NO_KIQ(mmMM_DATA);
402 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
407 * amdgpu_device_aper_access - access vram by vram aperature
409 * @adev: amdgpu_device pointer
410 * @pos: offset of the buffer in vram
411 * @buf: virtual address of the buffer in system memory
412 * @size: read/write size, sizeof(@buf) must > @size
413 * @write: true - write to vram, otherwise - read from vram
415 * The return value means how many bytes have been transferred.
417 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
418 void *buf, size_t size, bool write)
425 if (!adev->mman.aper_base_kaddr)
428 last = min(pos + size, adev->gmc.visible_vram_size);
430 addr = adev->mman.aper_base_kaddr + pos;
434 memcpy_toio(addr, buf, count);
435 /* Make sure HDP write cache flush happens without any reordering
436 * after the system memory contents are sent over PCIe device
439 amdgpu_device_flush_hdp(adev, NULL);
441 amdgpu_device_invalidate_hdp(adev, NULL);
442 /* Make sure HDP read cache is invalidated before issuing a read
446 memcpy_fromio(buf, addr, count);
458 * amdgpu_device_vram_access - read/write a buffer in vram
460 * @adev: amdgpu_device pointer
461 * @pos: offset of the buffer in vram
462 * @buf: virtual address of the buffer in system memory
463 * @size: read/write size, sizeof(@buf) must > @size
464 * @write: true - write to vram, otherwise - read from vram
466 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
467 void *buf, size_t size, bool write)
471 /* try to using vram apreature to access vram first */
472 count = amdgpu_device_aper_access(adev, pos, buf, size, write);
475 /* using MM to access rest vram */
478 amdgpu_device_mm_access(adev, pos, buf, size, write);
483 * register access helper functions.
486 /* Check if hw access should be skipped because of hotplug or device error */
487 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
489 if (adev->no_hw_access)
492 #ifdef CONFIG_LOCKDEP
494 * This is a bit complicated to understand, so worth a comment. What we assert
495 * here is that the GPU reset is not running on another thread in parallel.
497 * For this we trylock the read side of the reset semaphore, if that succeeds
498 * we know that the reset is not running in paralell.
500 * If the trylock fails we assert that we are either already holding the read
501 * side of the lock or are the reset thread itself and hold the write side of
505 if (down_read_trylock(&adev->reset_domain->sem))
506 up_read(&adev->reset_domain->sem);
508 lockdep_assert_held(&adev->reset_domain->sem);
515 * amdgpu_device_rreg - read a memory mapped IO or indirect register
517 * @adev: amdgpu_device pointer
518 * @reg: dword aligned register offset
519 * @acc_flags: access flags which require special behavior
521 * Returns the 32 bit value from the offset specified.
523 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
524 uint32_t reg, uint32_t acc_flags)
528 if (amdgpu_device_skip_hw_access(adev))
531 if ((reg * 4) < adev->rmmio_size) {
532 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
533 amdgpu_sriov_runtime(adev) &&
534 down_read_trylock(&adev->reset_domain->sem)) {
535 ret = amdgpu_kiq_rreg(adev, reg, 0);
536 up_read(&adev->reset_domain->sem);
538 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
541 ret = adev->pcie_rreg(adev, reg * 4);
544 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
550 * MMIO register read with bytes helper functions
551 * @offset:bytes offset from MMIO start
555 * amdgpu_mm_rreg8 - read a memory mapped IO register
557 * @adev: amdgpu_device pointer
558 * @offset: byte aligned register offset
560 * Returns the 8 bit value from the offset specified.
562 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
564 if (amdgpu_device_skip_hw_access(adev))
567 if (offset < adev->rmmio_size)
568 return (readb(adev->rmmio + offset));
574 * amdgpu_device_xcc_rreg - read a memory mapped IO or indirect register with specific XCC
576 * @adev: amdgpu_device pointer
577 * @reg: dword aligned register offset
578 * @acc_flags: access flags which require special behavior
579 * @xcc_id: xcc accelerated compute core id
581 * Returns the 32 bit value from the offset specified.
583 uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev,
584 uint32_t reg, uint32_t acc_flags,
587 uint32_t ret, rlcg_flag;
589 if (amdgpu_device_skip_hw_access(adev))
592 if ((reg * 4) < adev->rmmio_size) {
593 if (amdgpu_sriov_vf(adev) &&
594 !amdgpu_sriov_runtime(adev) &&
595 adev->gfx.rlc.rlcg_reg_access_supported &&
596 amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
599 ret = amdgpu_virt_rlcg_reg_rw(adev, reg, 0, rlcg_flag, xcc_id);
600 } else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
601 amdgpu_sriov_runtime(adev) &&
602 down_read_trylock(&adev->reset_domain->sem)) {
603 ret = amdgpu_kiq_rreg(adev, reg, xcc_id);
604 up_read(&adev->reset_domain->sem);
606 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
609 ret = adev->pcie_rreg(adev, reg * 4);
616 * MMIO register write with bytes helper functions
617 * @offset:bytes offset from MMIO start
618 * @value: the value want to be written to the register
622 * amdgpu_mm_wreg8 - read a memory mapped IO register
624 * @adev: amdgpu_device pointer
625 * @offset: byte aligned register offset
626 * @value: 8 bit value to write
628 * Writes the value specified to the offset specified.
630 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
632 if (amdgpu_device_skip_hw_access(adev))
635 if (offset < adev->rmmio_size)
636 writeb(value, adev->rmmio + offset);
642 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
644 * @adev: amdgpu_device pointer
645 * @reg: dword aligned register offset
646 * @v: 32 bit value to write to the register
647 * @acc_flags: access flags which require special behavior
649 * Writes the value specified to the offset specified.
651 void amdgpu_device_wreg(struct amdgpu_device *adev,
652 uint32_t reg, uint32_t v,
655 if (amdgpu_device_skip_hw_access(adev))
658 if ((reg * 4) < adev->rmmio_size) {
659 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
660 amdgpu_sriov_runtime(adev) &&
661 down_read_trylock(&adev->reset_domain->sem)) {
662 amdgpu_kiq_wreg(adev, reg, v, 0);
663 up_read(&adev->reset_domain->sem);
665 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
668 adev->pcie_wreg(adev, reg * 4, v);
671 trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
675 * amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if in range
677 * @adev: amdgpu_device pointer
678 * @reg: mmio/rlc register
680 * @xcc_id: xcc accelerated compute core id
682 * this function is invoked only for the debugfs register access
684 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
685 uint32_t reg, uint32_t v,
688 if (amdgpu_device_skip_hw_access(adev))
691 if (amdgpu_sriov_fullaccess(adev) &&
692 adev->gfx.rlc.funcs &&
693 adev->gfx.rlc.funcs->is_rlcg_access_range) {
694 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
695 return amdgpu_sriov_wreg(adev, reg, v, 0, 0, xcc_id);
696 } else if ((reg * 4) >= adev->rmmio_size) {
697 adev->pcie_wreg(adev, reg * 4, v);
699 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
704 * amdgpu_device_xcc_wreg - write to a memory mapped IO or indirect register with specific XCC
706 * @adev: amdgpu_device pointer
707 * @reg: dword aligned register offset
708 * @v: 32 bit value to write to the register
709 * @acc_flags: access flags which require special behavior
710 * @xcc_id: xcc accelerated compute core id
712 * Writes the value specified to the offset specified.
714 void amdgpu_device_xcc_wreg(struct amdgpu_device *adev,
715 uint32_t reg, uint32_t v,
716 uint32_t acc_flags, uint32_t xcc_id)
720 if (amdgpu_device_skip_hw_access(adev))
723 if ((reg * 4) < adev->rmmio_size) {
724 if (amdgpu_sriov_vf(adev) &&
725 !amdgpu_sriov_runtime(adev) &&
726 adev->gfx.rlc.rlcg_reg_access_supported &&
727 amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
730 amdgpu_virt_rlcg_reg_rw(adev, reg, v, rlcg_flag, xcc_id);
731 } else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
732 amdgpu_sriov_runtime(adev) &&
733 down_read_trylock(&adev->reset_domain->sem)) {
734 amdgpu_kiq_wreg(adev, reg, v, xcc_id);
735 up_read(&adev->reset_domain->sem);
737 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
740 adev->pcie_wreg(adev, reg * 4, v);
745 * amdgpu_device_indirect_rreg - read an indirect register
747 * @adev: amdgpu_device pointer
748 * @reg_addr: indirect register address to read from
750 * Returns the value of indirect register @reg_addr
752 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
755 unsigned long flags, pcie_index, pcie_data;
756 void __iomem *pcie_index_offset;
757 void __iomem *pcie_data_offset;
760 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
761 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
763 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
764 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
765 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
767 writel(reg_addr, pcie_index_offset);
768 readl(pcie_index_offset);
769 r = readl(pcie_data_offset);
770 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
775 u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
778 unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
780 void __iomem *pcie_index_offset;
781 void __iomem *pcie_index_hi_offset;
782 void __iomem *pcie_data_offset;
784 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
785 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
786 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
787 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
791 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
792 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
793 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
794 if (pcie_index_hi != 0)
795 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
798 writel(reg_addr, pcie_index_offset);
799 readl(pcie_index_offset);
800 if (pcie_index_hi != 0) {
801 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
802 readl(pcie_index_hi_offset);
804 r = readl(pcie_data_offset);
806 /* clear the high bits */
807 if (pcie_index_hi != 0) {
808 writel(0, pcie_index_hi_offset);
809 readl(pcie_index_hi_offset);
812 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
818 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
820 * @adev: amdgpu_device pointer
821 * @reg_addr: indirect register address to read from
823 * Returns the value of indirect register @reg_addr
825 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
828 unsigned long flags, pcie_index, pcie_data;
829 void __iomem *pcie_index_offset;
830 void __iomem *pcie_data_offset;
833 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
834 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
836 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
837 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
838 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
840 /* read low 32 bits */
841 writel(reg_addr, pcie_index_offset);
842 readl(pcie_index_offset);
843 r = readl(pcie_data_offset);
844 /* read high 32 bits */
845 writel(reg_addr + 4, pcie_index_offset);
846 readl(pcie_index_offset);
847 r |= ((u64)readl(pcie_data_offset) << 32);
848 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
853 u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev,
856 unsigned long flags, pcie_index, pcie_data;
857 unsigned long pcie_index_hi = 0;
858 void __iomem *pcie_index_offset;
859 void __iomem *pcie_index_hi_offset;
860 void __iomem *pcie_data_offset;
863 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
864 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
865 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
866 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
868 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
869 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
870 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
871 if (pcie_index_hi != 0)
872 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
875 /* read low 32 bits */
876 writel(reg_addr, pcie_index_offset);
877 readl(pcie_index_offset);
878 if (pcie_index_hi != 0) {
879 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
880 readl(pcie_index_hi_offset);
882 r = readl(pcie_data_offset);
883 /* read high 32 bits */
884 writel(reg_addr + 4, pcie_index_offset);
885 readl(pcie_index_offset);
886 if (pcie_index_hi != 0) {
887 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
888 readl(pcie_index_hi_offset);
890 r |= ((u64)readl(pcie_data_offset) << 32);
892 /* clear the high bits */
893 if (pcie_index_hi != 0) {
894 writel(0, pcie_index_hi_offset);
895 readl(pcie_index_hi_offset);
898 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
904 * amdgpu_device_indirect_wreg - write an indirect register address
906 * @adev: amdgpu_device pointer
907 * @reg_addr: indirect register offset
908 * @reg_data: indirect register data
911 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
912 u32 reg_addr, u32 reg_data)
914 unsigned long flags, pcie_index, pcie_data;
915 void __iomem *pcie_index_offset;
916 void __iomem *pcie_data_offset;
918 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
919 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
921 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
922 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
923 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
925 writel(reg_addr, pcie_index_offset);
926 readl(pcie_index_offset);
927 writel(reg_data, pcie_data_offset);
928 readl(pcie_data_offset);
929 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
932 void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
933 u64 reg_addr, u32 reg_data)
935 unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
936 void __iomem *pcie_index_offset;
937 void __iomem *pcie_index_hi_offset;
938 void __iomem *pcie_data_offset;
940 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
941 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
942 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
943 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
947 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
948 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
949 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
950 if (pcie_index_hi != 0)
951 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
954 writel(reg_addr, pcie_index_offset);
955 readl(pcie_index_offset);
956 if (pcie_index_hi != 0) {
957 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
958 readl(pcie_index_hi_offset);
960 writel(reg_data, pcie_data_offset);
961 readl(pcie_data_offset);
963 /* clear the high bits */
964 if (pcie_index_hi != 0) {
965 writel(0, pcie_index_hi_offset);
966 readl(pcie_index_hi_offset);
969 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
973 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
975 * @adev: amdgpu_device pointer
976 * @reg_addr: indirect register offset
977 * @reg_data: indirect register data
980 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
981 u32 reg_addr, u64 reg_data)
983 unsigned long flags, pcie_index, pcie_data;
984 void __iomem *pcie_index_offset;
985 void __iomem *pcie_data_offset;
987 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
988 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
990 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
991 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
992 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
994 /* write low 32 bits */
995 writel(reg_addr, pcie_index_offset);
996 readl(pcie_index_offset);
997 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
998 readl(pcie_data_offset);
999 /* write high 32 bits */
1000 writel(reg_addr + 4, pcie_index_offset);
1001 readl(pcie_index_offset);
1002 writel((u32)(reg_data >> 32), pcie_data_offset);
1003 readl(pcie_data_offset);
1004 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1007 void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev,
1008 u64 reg_addr, u64 reg_data)
1010 unsigned long flags, pcie_index, pcie_data;
1011 unsigned long pcie_index_hi = 0;
1012 void __iomem *pcie_index_offset;
1013 void __iomem *pcie_index_hi_offset;
1014 void __iomem *pcie_data_offset;
1016 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1017 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1018 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
1019 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
1021 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1022 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1023 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1024 if (pcie_index_hi != 0)
1025 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
1028 /* write low 32 bits */
1029 writel(reg_addr, pcie_index_offset);
1030 readl(pcie_index_offset);
1031 if (pcie_index_hi != 0) {
1032 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1033 readl(pcie_index_hi_offset);
1035 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
1036 readl(pcie_data_offset);
1037 /* write high 32 bits */
1038 writel(reg_addr + 4, pcie_index_offset);
1039 readl(pcie_index_offset);
1040 if (pcie_index_hi != 0) {
1041 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1042 readl(pcie_index_hi_offset);
1044 writel((u32)(reg_data >> 32), pcie_data_offset);
1045 readl(pcie_data_offset);
1047 /* clear the high bits */
1048 if (pcie_index_hi != 0) {
1049 writel(0, pcie_index_hi_offset);
1050 readl(pcie_index_hi_offset);
1053 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1057 * amdgpu_device_get_rev_id - query device rev_id
1059 * @adev: amdgpu_device pointer
1061 * Return device rev_id
1063 u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
1065 return adev->nbio.funcs->get_rev_id(adev);
1069 * amdgpu_invalid_rreg - dummy reg read function
1071 * @adev: amdgpu_device pointer
1072 * @reg: offset of register
1074 * Dummy register read function. Used for register blocks
1075 * that certain asics don't have (all asics).
1076 * Returns the value in the register.
1078 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
1080 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
1085 static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg)
1087 DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
1093 * amdgpu_invalid_wreg - dummy reg write function
1095 * @adev: amdgpu_device pointer
1096 * @reg: offset of register
1097 * @v: value to write to the register
1099 * Dummy register read function. Used for register blocks
1100 * that certain asics don't have (all asics).
1102 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
1104 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
1109 static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v)
1111 DRM_ERROR("Invalid callback to write register 0x%llX with 0x%08X\n",
1117 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
1119 * @adev: amdgpu_device pointer
1120 * @reg: offset of register
1122 * Dummy register read function. Used for register blocks
1123 * that certain asics don't have (all asics).
1124 * Returns the value in the register.
1126 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
1128 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
1133 static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t reg)
1135 DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
1141 * amdgpu_invalid_wreg64 - dummy reg write function
1143 * @adev: amdgpu_device pointer
1144 * @reg: offset of register
1145 * @v: value to write to the register
1147 * Dummy register read function. Used for register blocks
1148 * that certain asics don't have (all asics).
1150 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
1152 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
1157 static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, uint64_t v)
1159 DRM_ERROR("Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n",
1165 * amdgpu_block_invalid_rreg - dummy reg read function
1167 * @adev: amdgpu_device pointer
1168 * @block: offset of instance
1169 * @reg: offset of register
1171 * Dummy register read function. Used for register blocks
1172 * that certain asics don't have (all asics).
1173 * Returns the value in the register.
1175 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
1176 uint32_t block, uint32_t reg)
1178 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
1185 * amdgpu_block_invalid_wreg - dummy reg write function
1187 * @adev: amdgpu_device pointer
1188 * @block: offset of instance
1189 * @reg: offset of register
1190 * @v: value to write to the register
1192 * Dummy register read function. Used for register blocks
1193 * that certain asics don't have (all asics).
1195 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
1197 uint32_t reg, uint32_t v)
1199 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
1205 * amdgpu_device_asic_init - Wrapper for atom asic_init
1207 * @adev: amdgpu_device pointer
1209 * Does any asic specific work and then calls atom asic init.
1211 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
1215 amdgpu_asic_pre_asic_init(adev);
1217 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
1218 amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
1219 amdgpu_psp_wait_for_bootloader(adev);
1220 ret = amdgpu_atomfirmware_asic_init(adev, true);
1223 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
1230 * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
1232 * @adev: amdgpu_device pointer
1234 * Allocates a scratch page of VRAM for use by various things in the
1237 static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
1239 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
1240 AMDGPU_GEM_DOMAIN_VRAM |
1241 AMDGPU_GEM_DOMAIN_GTT,
1242 &adev->mem_scratch.robj,
1243 &adev->mem_scratch.gpu_addr,
1244 (void **)&adev->mem_scratch.ptr);
1248 * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
1250 * @adev: amdgpu_device pointer
1252 * Frees the VRAM scratch page.
1254 static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
1256 amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
1260 * amdgpu_device_program_register_sequence - program an array of registers.
1262 * @adev: amdgpu_device pointer
1263 * @registers: pointer to the register array
1264 * @array_size: size of the register array
1266 * Programs an array or registers with and or masks.
1267 * This is a helper for setting golden registers.
1269 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
1270 const u32 *registers,
1271 const u32 array_size)
1273 u32 tmp, reg, and_mask, or_mask;
1279 for (i = 0; i < array_size; i += 3) {
1280 reg = registers[i + 0];
1281 and_mask = registers[i + 1];
1282 or_mask = registers[i + 2];
1284 if (and_mask == 0xffffffff) {
1289 if (adev->family >= AMDGPU_FAMILY_AI)
1290 tmp |= (or_mask & and_mask);
1299 * amdgpu_device_pci_config_reset - reset the GPU
1301 * @adev: amdgpu_device pointer
1303 * Resets the GPU using the pci config reset sequence.
1304 * Only applicable to asics prior to vega10.
1306 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1308 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1312 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1314 * @adev: amdgpu_device pointer
1316 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1318 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1320 return pci_reset_function(adev->pdev);
1324 * amdgpu_device_wb_*()
1325 * Writeback is the method by which the GPU updates special pages in memory
1326 * with the status of certain GPU events (fences, ring pointers,etc.).
1330 * amdgpu_device_wb_fini - Disable Writeback and free memory
1332 * @adev: amdgpu_device pointer
1334 * Disables Writeback and frees the Writeback memory (all asics).
1335 * Used at driver shutdown.
1337 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1339 if (adev->wb.wb_obj) {
1340 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1342 (void **)&adev->wb.wb);
1343 adev->wb.wb_obj = NULL;
1348 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1350 * @adev: amdgpu_device pointer
1352 * Initializes writeback and allocates writeback memory (all asics).
1353 * Used at driver startup.
1354 * Returns 0 on success or an -error on failure.
1356 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1360 if (adev->wb.wb_obj == NULL) {
1361 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1362 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1363 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1364 &adev->wb.wb_obj, &adev->wb.gpu_addr,
1365 (void **)&adev->wb.wb);
1367 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1371 adev->wb.num_wb = AMDGPU_MAX_WB;
1372 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1374 /* clear wb memory */
1375 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1382 * amdgpu_device_wb_get - Allocate a wb entry
1384 * @adev: amdgpu_device pointer
1387 * Allocate a wb slot for use by the driver (all asics).
1388 * Returns 0 on success or -EINVAL on failure.
1390 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1392 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1394 if (offset < adev->wb.num_wb) {
1395 __set_bit(offset, adev->wb.used);
1396 *wb = offset << 3; /* convert to dw offset */
1404 * amdgpu_device_wb_free - Free a wb entry
1406 * @adev: amdgpu_device pointer
1409 * Free a wb slot allocated for use by the driver (all asics)
1411 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1414 if (wb < adev->wb.num_wb)
1415 __clear_bit(wb, adev->wb.used);
1419 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1421 * @adev: amdgpu_device pointer
1423 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1424 * to fail, but if any of the BARs is not accessible after the size we abort
1425 * driver loading by returning -ENODEV.
1427 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1429 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1430 struct pci_bus *root;
1431 struct resource *res;
1436 if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
1440 if (amdgpu_sriov_vf(adev))
1443 /* skip if the bios has already enabled large BAR */
1444 if (adev->gmc.real_vram_size &&
1445 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1448 /* Check if the root BUS has 64bit memory resources */
1449 root = adev->pdev->bus;
1450 while (root->parent)
1451 root = root->parent;
1453 pci_bus_for_each_resource(root, res, i) {
1454 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1455 res->start > 0x100000000ull)
1459 /* Trying to resize is pointless without a root hub window above 4GB */
1463 /* Limit the BAR size to what is available */
1464 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1467 /* Disable memory decoding while we change the BAR addresses and size */
1468 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1469 pci_write_config_word(adev->pdev, PCI_COMMAND,
1470 cmd & ~PCI_COMMAND_MEMORY);
1472 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1473 amdgpu_doorbell_fini(adev);
1474 if (adev->asic_type >= CHIP_BONAIRE)
1475 pci_release_resource(adev->pdev, 2);
1477 pci_release_resource(adev->pdev, 0);
1479 r = pci_resize_resource(adev->pdev, 0, rbar_size);
1481 DRM_INFO("Not enough PCI address space for a large BAR.");
1482 else if (r && r != -ENOTSUPP)
1483 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1485 pci_assign_unassigned_bus_resources(adev->pdev->bus);
1487 /* When the doorbell or fb BAR isn't available we have no chance of
1490 r = amdgpu_doorbell_init(adev);
1491 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1494 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1499 static bool amdgpu_device_read_bios(struct amdgpu_device *adev)
1501 if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
1508 * GPU helpers function.
1511 * amdgpu_device_need_post - check if the hw need post or not
1513 * @adev: amdgpu_device pointer
1515 * Check if the asic has been initialized (all asics) at driver startup
1516 * or post is needed if hw reset is performed.
1517 * Returns true if need or false if not.
1519 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1523 if (amdgpu_sriov_vf(adev))
1526 if (!amdgpu_device_read_bios(adev))
1529 if (amdgpu_passthrough(adev)) {
1530 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1531 * some old smc fw still need driver do vPost otherwise gpu hang, while
1532 * those smc fw version above 22.15 doesn't have this flaw, so we force
1533 * vpost executed for smc version below 22.15
1535 if (adev->asic_type == CHIP_FIJI) {
1539 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1540 /* force vPost if error occured */
1544 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1545 release_firmware(adev->pm.fw);
1546 if (fw_ver < 0x00160e00)
1551 /* Don't post if we need to reset whole hive on init */
1552 if (adev->gmc.xgmi.pending_reset)
1555 if (adev->has_hw_reset) {
1556 adev->has_hw_reset = false;
1560 /* bios scratch used on CIK+ */
1561 if (adev->asic_type >= CHIP_BONAIRE)
1562 return amdgpu_atombios_scratch_need_asic_init(adev);
1564 /* check MEM_SIZE for older asics */
1565 reg = amdgpu_asic_get_config_memsize(adev);
1567 if ((reg != 0) && (reg != 0xffffffff))
1574 * Check whether seamless boot is supported.
1576 * So far we only support seamless boot on DCE 3.0 or later.
1577 * If users report that it works on older ASICS as well, we may
1580 bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev)
1582 switch (amdgpu_seamless) {
1590 DRM_ERROR("Invalid value for amdgpu.seamless: %d\n",
1595 if (!(adev->flags & AMD_IS_APU))
1598 if (adev->mman.keep_stolen_vga_memory)
1601 return amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0);
1605 * Intel hosts such as Rocket Lake, Alder Lake, Raptor Lake and Sapphire Rapids
1606 * don't support dynamic speed switching. Until we have confirmation from Intel
1607 * that a specific host supports it, it's safer that we keep it disabled for all.
1609 * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
1610 * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
1612 static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device *adev)
1614 #if IS_ENABLED(CONFIG_X86)
1615 struct cpuinfo_x86 *c = &cpu_data(0);
1617 /* eGPU change speeds based on USB4 fabric conditions */
1618 if (dev_is_removable(adev->dev))
1621 if (c->x86_vendor == X86_VENDOR_INTEL)
1628 * amdgpu_device_should_use_aspm - check if the device should program ASPM
1630 * @adev: amdgpu_device pointer
1632 * Confirm whether the module parameter and pcie bridge agree that ASPM should
1633 * be set for this device.
1635 * Returns true if it should be used or false if not.
1637 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1639 switch (amdgpu_aspm) {
1649 if (adev->flags & AMD_IS_APU)
1651 if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK))
1653 return pcie_aspm_enabled(adev->pdev);
1656 /* if we get transitioned to only one device, take VGA back */
1658 * amdgpu_device_vga_set_decode - enable/disable vga decode
1660 * @pdev: PCI device pointer
1661 * @state: enable/disable vga decode
1663 * Enable/disable vga decode (all asics).
1664 * Returns VGA resource flags.
1666 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1669 struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1671 amdgpu_asic_set_vga_state(adev, state);
1673 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1674 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1676 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1680 * amdgpu_device_check_block_size - validate the vm block size
1682 * @adev: amdgpu_device pointer
1684 * Validates the vm block size specified via module parameter.
1685 * The vm block size defines number of bits in page table versus page directory,
1686 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1687 * page table and the remaining bits are in the page directory.
1689 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1691 /* defines number of bits in page table versus page directory,
1692 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1693 * page table and the remaining bits are in the page directory
1695 if (amdgpu_vm_block_size == -1)
1698 if (amdgpu_vm_block_size < 9) {
1699 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1700 amdgpu_vm_block_size);
1701 amdgpu_vm_block_size = -1;
1706 * amdgpu_device_check_vm_size - validate the vm size
1708 * @adev: amdgpu_device pointer
1710 * Validates the vm size in GB specified via module parameter.
1711 * The VM size is the size of the GPU virtual memory space in GB.
1713 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1715 /* no need to check the default value */
1716 if (amdgpu_vm_size == -1)
1719 if (amdgpu_vm_size < 1) {
1720 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1722 amdgpu_vm_size = -1;
1726 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1729 bool is_os_64 = (sizeof(void *) == 8);
1730 uint64_t total_memory;
1731 uint64_t dram_size_seven_GB = 0x1B8000000;
1732 uint64_t dram_size_three_GB = 0xB8000000;
1734 if (amdgpu_smu_memory_pool_size == 0)
1738 DRM_WARN("Not 64-bit OS, feature not supported\n");
1742 total_memory = (uint64_t)si.totalram * si.mem_unit;
1744 if ((amdgpu_smu_memory_pool_size == 1) ||
1745 (amdgpu_smu_memory_pool_size == 2)) {
1746 if (total_memory < dram_size_three_GB)
1748 } else if ((amdgpu_smu_memory_pool_size == 4) ||
1749 (amdgpu_smu_memory_pool_size == 8)) {
1750 if (total_memory < dram_size_seven_GB)
1753 DRM_WARN("Smu memory pool size not supported\n");
1756 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1761 DRM_WARN("No enough system memory\n");
1763 adev->pm.smu_prv_buffer_size = 0;
1766 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1768 if (!(adev->flags & AMD_IS_APU) ||
1769 adev->asic_type < CHIP_RAVEN)
1772 switch (adev->asic_type) {
1774 if (adev->pdev->device == 0x15dd)
1775 adev->apu_flags |= AMD_APU_IS_RAVEN;
1776 if (adev->pdev->device == 0x15d8)
1777 adev->apu_flags |= AMD_APU_IS_PICASSO;
1780 if ((adev->pdev->device == 0x1636) ||
1781 (adev->pdev->device == 0x164c))
1782 adev->apu_flags |= AMD_APU_IS_RENOIR;
1784 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1787 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1789 case CHIP_YELLOW_CARP:
1791 case CHIP_CYAN_SKILLFISH:
1792 if ((adev->pdev->device == 0x13FE) ||
1793 (adev->pdev->device == 0x143F))
1794 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1804 * amdgpu_device_check_arguments - validate module params
1806 * @adev: amdgpu_device pointer
1808 * Validates certain module parameters and updates
1809 * the associated values used by the driver (all asics).
1811 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1813 if (amdgpu_sched_jobs < 4) {
1814 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1816 amdgpu_sched_jobs = 4;
1817 } else if (!is_power_of_2(amdgpu_sched_jobs)) {
1818 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1820 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1823 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1824 /* gart size must be greater or equal to 32M */
1825 dev_warn(adev->dev, "gart size (%d) too small\n",
1827 amdgpu_gart_size = -1;
1830 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1831 /* gtt size must be greater or equal to 32M */
1832 dev_warn(adev->dev, "gtt size (%d) too small\n",
1834 amdgpu_gtt_size = -1;
1837 /* valid range is between 4 and 9 inclusive */
1838 if (amdgpu_vm_fragment_size != -1 &&
1839 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1840 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1841 amdgpu_vm_fragment_size = -1;
1844 if (amdgpu_sched_hw_submission < 2) {
1845 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1846 amdgpu_sched_hw_submission);
1847 amdgpu_sched_hw_submission = 2;
1848 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1849 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1850 amdgpu_sched_hw_submission);
1851 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1854 if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1855 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1856 amdgpu_reset_method = -1;
1859 amdgpu_device_check_smu_prv_buffer_size(adev);
1861 amdgpu_device_check_vm_size(adev);
1863 amdgpu_device_check_block_size(adev);
1865 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1871 * amdgpu_switcheroo_set_state - set switcheroo state
1873 * @pdev: pci dev pointer
1874 * @state: vga_switcheroo state
1876 * Callback for the switcheroo driver. Suspends or resumes
1877 * the asics before or after it is powered up using ACPI methods.
1879 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1880 enum vga_switcheroo_state state)
1882 struct drm_device *dev = pci_get_drvdata(pdev);
1885 if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1888 if (state == VGA_SWITCHEROO_ON) {
1889 pr_info("switched on\n");
1890 /* don't suspend or resume card normally */
1891 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1893 pci_set_power_state(pdev, PCI_D0);
1894 amdgpu_device_load_pci_state(pdev);
1895 r = pci_enable_device(pdev);
1897 DRM_WARN("pci_enable_device failed (%d)\n", r);
1898 amdgpu_device_resume(dev, true);
1900 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1902 pr_info("switched off\n");
1903 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1904 amdgpu_device_prepare(dev);
1905 amdgpu_device_suspend(dev, true);
1906 amdgpu_device_cache_pci_state(pdev);
1907 /* Shut down the device */
1908 pci_disable_device(pdev);
1909 pci_set_power_state(pdev, PCI_D3cold);
1910 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1915 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1917 * @pdev: pci dev pointer
1919 * Callback for the switcheroo driver. Check of the switcheroo
1920 * state can be changed.
1921 * Returns true if the state can be changed, false if not.
1923 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1925 struct drm_device *dev = pci_get_drvdata(pdev);
1928 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1929 * locking inversion with the driver load path. And the access here is
1930 * completely racy anyway. So don't bother with locking for now.
1932 return atomic_read(&dev->open_count) == 0;
1935 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1936 .set_gpu_state = amdgpu_switcheroo_set_state,
1938 .can_switch = amdgpu_switcheroo_can_switch,
1942 * amdgpu_device_ip_set_clockgating_state - set the CG state
1944 * @dev: amdgpu_device pointer
1945 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1946 * @state: clockgating state (gate or ungate)
1948 * Sets the requested clockgating state for all instances of
1949 * the hardware IP specified.
1950 * Returns the error code from the last instance.
1952 int amdgpu_device_ip_set_clockgating_state(void *dev,
1953 enum amd_ip_block_type block_type,
1954 enum amd_clockgating_state state)
1956 struct amdgpu_device *adev = dev;
1959 for (i = 0; i < adev->num_ip_blocks; i++) {
1960 if (!adev->ip_blocks[i].status.valid)
1962 if (adev->ip_blocks[i].version->type != block_type)
1964 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1966 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1967 (void *)adev, state);
1969 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1970 adev->ip_blocks[i].version->funcs->name, r);
1976 * amdgpu_device_ip_set_powergating_state - set the PG state
1978 * @dev: amdgpu_device pointer
1979 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1980 * @state: powergating state (gate or ungate)
1982 * Sets the requested powergating state for all instances of
1983 * the hardware IP specified.
1984 * Returns the error code from the last instance.
1986 int amdgpu_device_ip_set_powergating_state(void *dev,
1987 enum amd_ip_block_type block_type,
1988 enum amd_powergating_state state)
1990 struct amdgpu_device *adev = dev;
1993 for (i = 0; i < adev->num_ip_blocks; i++) {
1994 if (!adev->ip_blocks[i].status.valid)
1996 if (adev->ip_blocks[i].version->type != block_type)
1998 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
2000 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
2001 (void *)adev, state);
2003 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
2004 adev->ip_blocks[i].version->funcs->name, r);
2010 * amdgpu_device_ip_get_clockgating_state - get the CG state
2012 * @adev: amdgpu_device pointer
2013 * @flags: clockgating feature flags
2015 * Walks the list of IPs on the device and updates the clockgating
2016 * flags for each IP.
2017 * Updates @flags with the feature flags for each hardware IP where
2018 * clockgating is enabled.
2020 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
2025 for (i = 0; i < adev->num_ip_blocks; i++) {
2026 if (!adev->ip_blocks[i].status.valid)
2028 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
2029 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
2034 * amdgpu_device_ip_wait_for_idle - wait for idle
2036 * @adev: amdgpu_device pointer
2037 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2039 * Waits for the request hardware IP to be idle.
2040 * Returns 0 for success or a negative error code on failure.
2042 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
2043 enum amd_ip_block_type block_type)
2047 for (i = 0; i < adev->num_ip_blocks; i++) {
2048 if (!adev->ip_blocks[i].status.valid)
2050 if (adev->ip_blocks[i].version->type == block_type) {
2051 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
2062 * amdgpu_device_ip_is_idle - is the hardware IP idle
2064 * @adev: amdgpu_device pointer
2065 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2067 * Check if the hardware IP is idle or not.
2068 * Returns true if it the IP is idle, false if not.
2070 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
2071 enum amd_ip_block_type block_type)
2075 for (i = 0; i < adev->num_ip_blocks; i++) {
2076 if (!adev->ip_blocks[i].status.valid)
2078 if (adev->ip_blocks[i].version->type == block_type)
2079 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
2086 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
2088 * @adev: amdgpu_device pointer
2089 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
2091 * Returns a pointer to the hardware IP block structure
2092 * if it exists for the asic, otherwise NULL.
2094 struct amdgpu_ip_block *
2095 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
2096 enum amd_ip_block_type type)
2100 for (i = 0; i < adev->num_ip_blocks; i++)
2101 if (adev->ip_blocks[i].version->type == type)
2102 return &adev->ip_blocks[i];
2108 * amdgpu_device_ip_block_version_cmp
2110 * @adev: amdgpu_device pointer
2111 * @type: enum amd_ip_block_type
2112 * @major: major version
2113 * @minor: minor version
2115 * return 0 if equal or greater
2116 * return 1 if smaller or the ip_block doesn't exist
2118 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
2119 enum amd_ip_block_type type,
2120 u32 major, u32 minor)
2122 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
2124 if (ip_block && ((ip_block->version->major > major) ||
2125 ((ip_block->version->major == major) &&
2126 (ip_block->version->minor >= minor))))
2133 * amdgpu_device_ip_block_add
2135 * @adev: amdgpu_device pointer
2136 * @ip_block_version: pointer to the IP to add
2138 * Adds the IP block driver information to the collection of IPs
2141 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
2142 const struct amdgpu_ip_block_version *ip_block_version)
2144 if (!ip_block_version)
2147 switch (ip_block_version->type) {
2148 case AMD_IP_BLOCK_TYPE_VCN:
2149 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
2152 case AMD_IP_BLOCK_TYPE_JPEG:
2153 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
2160 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
2161 ip_block_version->funcs->name);
2163 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
2169 * amdgpu_device_enable_virtual_display - enable virtual display feature
2171 * @adev: amdgpu_device pointer
2173 * Enabled the virtual display feature if the user has enabled it via
2174 * the module parameter virtual_display. This feature provides a virtual
2175 * display hardware on headless boards or in virtualized environments.
2176 * This function parses and validates the configuration string specified by
2177 * the user and configues the virtual display configuration (number of
2178 * virtual connectors, crtcs, etc.) specified.
2180 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
2182 adev->enable_virtual_display = false;
2184 if (amdgpu_virtual_display) {
2185 const char *pci_address_name = pci_name(adev->pdev);
2186 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
2188 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
2189 pciaddstr_tmp = pciaddstr;
2190 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
2191 pciaddname = strsep(&pciaddname_tmp, ",");
2192 if (!strcmp("all", pciaddname)
2193 || !strcmp(pci_address_name, pciaddname)) {
2197 adev->enable_virtual_display = true;
2200 res = kstrtol(pciaddname_tmp, 10,
2208 adev->mode_info.num_crtc = num_crtc;
2210 adev->mode_info.num_crtc = 1;
2216 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
2217 amdgpu_virtual_display, pci_address_name,
2218 adev->enable_virtual_display, adev->mode_info.num_crtc);
2224 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
2226 if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
2227 adev->mode_info.num_crtc = 1;
2228 adev->enable_virtual_display = true;
2229 DRM_INFO("virtual_display:%d, num_crtc:%d\n",
2230 adev->enable_virtual_display, adev->mode_info.num_crtc);
2235 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
2237 * @adev: amdgpu_device pointer
2239 * Parses the asic configuration parameters specified in the gpu info
2240 * firmware and makes them availale to the driver for use in configuring
2242 * Returns 0 on success, -EINVAL on failure.
2244 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
2246 const char *chip_name;
2249 const struct gpu_info_firmware_header_v1_0 *hdr;
2251 adev->firmware.gpu_info_fw = NULL;
2253 if (adev->mman.discovery_bin)
2256 switch (adev->asic_type) {
2260 chip_name = "vega10";
2263 chip_name = "vega12";
2266 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
2267 chip_name = "raven2";
2268 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
2269 chip_name = "picasso";
2271 chip_name = "raven";
2274 chip_name = "arcturus";
2277 chip_name = "navi12";
2281 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
2282 err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, fw_name);
2285 "Failed to get gpu_info firmware \"%s\"\n",
2290 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
2291 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
2293 switch (hdr->version_major) {
2296 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2297 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2298 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2301 * Should be droped when DAL no longer needs it.
2303 if (adev->asic_type == CHIP_NAVI12)
2304 goto parse_soc_bounding_box;
2306 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2307 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2308 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2309 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2310 adev->gfx.config.max_texture_channel_caches =
2311 le32_to_cpu(gpu_info_fw->gc_num_tccs);
2312 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2313 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2314 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2315 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2316 adev->gfx.config.double_offchip_lds_buf =
2317 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2318 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2319 adev->gfx.cu_info.max_waves_per_simd =
2320 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2321 adev->gfx.cu_info.max_scratch_slots_per_cu =
2322 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2323 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2324 if (hdr->version_minor >= 1) {
2325 const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2326 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2327 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2328 adev->gfx.config.num_sc_per_sh =
2329 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2330 adev->gfx.config.num_packer_per_sc =
2331 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2334 parse_soc_bounding_box:
2336 * soc bounding box info is not integrated in disocovery table,
2337 * we always need to parse it from gpu info firmware if needed.
2339 if (hdr->version_minor == 2) {
2340 const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2341 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2342 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2343 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2349 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2358 * amdgpu_device_ip_early_init - run early init for hardware IPs
2360 * @adev: amdgpu_device pointer
2362 * Early initialization pass for hardware IPs. The hardware IPs that make
2363 * up each asic are discovered each IP's early_init callback is run. This
2364 * is the first stage in initializing the asic.
2365 * Returns 0 on success, negative error code on failure.
2367 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2369 struct pci_dev *parent;
2373 amdgpu_device_enable_virtual_display(adev);
2375 if (amdgpu_sriov_vf(adev)) {
2376 r = amdgpu_virt_request_full_gpu(adev, true);
2381 switch (adev->asic_type) {
2382 #ifdef CONFIG_DRM_AMDGPU_SI
2388 adev->family = AMDGPU_FAMILY_SI;
2389 r = si_set_ip_blocks(adev);
2394 #ifdef CONFIG_DRM_AMDGPU_CIK
2400 if (adev->flags & AMD_IS_APU)
2401 adev->family = AMDGPU_FAMILY_KV;
2403 adev->family = AMDGPU_FAMILY_CI;
2405 r = cik_set_ip_blocks(adev);
2413 case CHIP_POLARIS10:
2414 case CHIP_POLARIS11:
2415 case CHIP_POLARIS12:
2419 if (adev->flags & AMD_IS_APU)
2420 adev->family = AMDGPU_FAMILY_CZ;
2422 adev->family = AMDGPU_FAMILY_VI;
2424 r = vi_set_ip_blocks(adev);
2429 r = amdgpu_discovery_set_ip_blocks(adev);
2435 if (amdgpu_has_atpx() &&
2436 (amdgpu_is_atpx_hybrid() ||
2437 amdgpu_has_atpx_dgpu_power_cntl()) &&
2438 ((adev->flags & AMD_IS_APU) == 0) &&
2439 !dev_is_removable(&adev->pdev->dev))
2440 adev->flags |= AMD_IS_PX;
2442 if (!(adev->flags & AMD_IS_APU)) {
2443 parent = pcie_find_root_port(adev->pdev);
2444 adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2448 adev->pm.pp_feature = amdgpu_pp_feature_mask;
2449 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2450 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2451 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2452 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2453 if (!amdgpu_device_pcie_dynamic_switching_supported(adev))
2454 adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
2457 for (i = 0; i < adev->num_ip_blocks; i++) {
2458 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2459 DRM_WARN("disabled ip block: %d <%s>\n",
2460 i, adev->ip_blocks[i].version->funcs->name);
2461 adev->ip_blocks[i].status.valid = false;
2463 if (adev->ip_blocks[i].version->funcs->early_init) {
2464 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2466 adev->ip_blocks[i].status.valid = false;
2468 DRM_ERROR("early_init of IP block <%s> failed %d\n",
2469 adev->ip_blocks[i].version->funcs->name, r);
2472 adev->ip_blocks[i].status.valid = true;
2475 adev->ip_blocks[i].status.valid = true;
2478 /* get the vbios after the asic_funcs are set up */
2479 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2480 r = amdgpu_device_parse_gpu_info_fw(adev);
2485 if (amdgpu_device_read_bios(adev)) {
2486 if (!amdgpu_get_bios(adev))
2489 r = amdgpu_atombios_init(adev);
2491 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2492 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2497 /*get pf2vf msg info at it's earliest time*/
2498 if (amdgpu_sriov_vf(adev))
2499 amdgpu_virt_init_data_exchange(adev);
2506 amdgpu_amdkfd_device_probe(adev);
2507 adev->cg_flags &= amdgpu_cg_mask;
2508 adev->pg_flags &= amdgpu_pg_mask;
2513 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2517 for (i = 0; i < adev->num_ip_blocks; i++) {
2518 if (!adev->ip_blocks[i].status.sw)
2520 if (adev->ip_blocks[i].status.hw)
2522 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2523 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2524 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2525 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2527 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2528 adev->ip_blocks[i].version->funcs->name, r);
2531 adev->ip_blocks[i].status.hw = true;
2538 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2542 for (i = 0; i < adev->num_ip_blocks; i++) {
2543 if (!adev->ip_blocks[i].status.sw)
2545 if (adev->ip_blocks[i].status.hw)
2547 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2549 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2550 adev->ip_blocks[i].version->funcs->name, r);
2553 adev->ip_blocks[i].status.hw = true;
2559 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2563 uint32_t smu_version;
2565 if (adev->asic_type >= CHIP_VEGA10) {
2566 for (i = 0; i < adev->num_ip_blocks; i++) {
2567 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2570 if (!adev->ip_blocks[i].status.sw)
2573 /* no need to do the fw loading again if already done*/
2574 if (adev->ip_blocks[i].status.hw == true)
2577 if (amdgpu_in_reset(adev) || adev->in_suspend) {
2578 r = adev->ip_blocks[i].version->funcs->resume(adev);
2580 DRM_ERROR("resume of IP block <%s> failed %d\n",
2581 adev->ip_blocks[i].version->funcs->name, r);
2585 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2587 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2588 adev->ip_blocks[i].version->funcs->name, r);
2593 adev->ip_blocks[i].status.hw = true;
2598 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2599 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2604 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2609 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2610 struct amdgpu_ring *ring = adev->rings[i];
2612 /* No need to setup the GPU scheduler for rings that don't need it */
2613 if (!ring || ring->no_scheduler)
2616 switch (ring->funcs->type) {
2617 case AMDGPU_RING_TYPE_GFX:
2618 timeout = adev->gfx_timeout;
2620 case AMDGPU_RING_TYPE_COMPUTE:
2621 timeout = adev->compute_timeout;
2623 case AMDGPU_RING_TYPE_SDMA:
2624 timeout = adev->sdma_timeout;
2627 timeout = adev->video_timeout;
2631 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, NULL,
2632 DRM_SCHED_PRIORITY_COUNT,
2633 ring->num_hw_submission, 0,
2634 timeout, adev->reset_domain->wq,
2635 ring->sched_score, ring->name,
2638 DRM_ERROR("Failed to create scheduler on ring %s.\n",
2642 r = amdgpu_uvd_entity_init(adev, ring);
2644 DRM_ERROR("Failed to create UVD scheduling entity on ring %s.\n",
2648 r = amdgpu_vce_entity_init(adev, ring);
2650 DRM_ERROR("Failed to create VCE scheduling entity on ring %s.\n",
2656 amdgpu_xcp_update_partition_sched_list(adev);
2663 * amdgpu_device_ip_init - run init for hardware IPs
2665 * @adev: amdgpu_device pointer
2667 * Main initialization pass for hardware IPs. The list of all the hardware
2668 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2669 * are run. sw_init initializes the software state associated with each IP
2670 * and hw_init initializes the hardware associated with each IP.
2671 * Returns 0 on success, negative error code on failure.
2673 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2677 r = amdgpu_ras_init(adev);
2681 for (i = 0; i < adev->num_ip_blocks; i++) {
2682 if (!adev->ip_blocks[i].status.valid)
2684 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2686 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2687 adev->ip_blocks[i].version->funcs->name, r);
2690 adev->ip_blocks[i].status.sw = true;
2692 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2693 /* need to do common hw init early so everything is set up for gmc */
2694 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2696 DRM_ERROR("hw_init %d failed %d\n", i, r);
2699 adev->ip_blocks[i].status.hw = true;
2700 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2701 /* need to do gmc hw init early so we can allocate gpu mem */
2702 /* Try to reserve bad pages early */
2703 if (amdgpu_sriov_vf(adev))
2704 amdgpu_virt_exchange_data(adev);
2706 r = amdgpu_device_mem_scratch_init(adev);
2708 DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
2711 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2713 DRM_ERROR("hw_init %d failed %d\n", i, r);
2716 r = amdgpu_device_wb_init(adev);
2718 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2721 adev->ip_blocks[i].status.hw = true;
2723 /* right after GMC hw init, we create CSA */
2724 if (adev->gfx.mcbp) {
2725 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2726 AMDGPU_GEM_DOMAIN_VRAM |
2727 AMDGPU_GEM_DOMAIN_GTT,
2730 DRM_ERROR("allocate CSA failed %d\n", r);
2735 r = amdgpu_seq64_init(adev);
2737 DRM_ERROR("allocate seq64 failed %d\n", r);
2743 if (amdgpu_sriov_vf(adev))
2744 amdgpu_virt_init_data_exchange(adev);
2746 r = amdgpu_ib_pool_init(adev);
2748 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2749 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2753 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2757 r = amdgpu_device_ip_hw_init_phase1(adev);
2761 r = amdgpu_device_fw_loading(adev);
2765 r = amdgpu_device_ip_hw_init_phase2(adev);
2770 * retired pages will be loaded from eeprom and reserved here,
2771 * it should be called after amdgpu_device_ip_hw_init_phase2 since
2772 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2773 * for I2C communication which only true at this point.
2775 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2776 * failure from bad gpu situation and stop amdgpu init process
2777 * accordingly. For other failed cases, it will still release all
2778 * the resource and print error message, rather than returning one
2779 * negative value to upper level.
2781 * Note: theoretically, this should be called before all vram allocations
2782 * to protect retired page from abusing
2784 r = amdgpu_ras_recovery_init(adev);
2789 * In case of XGMI grab extra reference for reset domain for this device
2791 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2792 if (amdgpu_xgmi_add_device(adev) == 0) {
2793 if (!amdgpu_sriov_vf(adev)) {
2794 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2796 if (WARN_ON(!hive)) {
2801 if (!hive->reset_domain ||
2802 !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2804 amdgpu_put_xgmi_hive(hive);
2808 /* Drop the early temporary reset domain we created for device */
2809 amdgpu_reset_put_reset_domain(adev->reset_domain);
2810 adev->reset_domain = hive->reset_domain;
2811 amdgpu_put_xgmi_hive(hive);
2816 r = amdgpu_device_init_schedulers(adev);
2820 if (adev->mman.buffer_funcs_ring->sched.ready)
2821 amdgpu_ttm_set_buffer_funcs_status(adev, true);
2823 /* Don't init kfd if whole hive need to be reset during init */
2824 if (!adev->gmc.xgmi.pending_reset) {
2825 kgd2kfd_init_zone_device(adev);
2826 amdgpu_amdkfd_device_init(adev);
2829 amdgpu_fru_get_product_info(adev);
2837 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2839 * @adev: amdgpu_device pointer
2841 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
2842 * this function before a GPU reset. If the value is retained after a
2843 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
2845 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2847 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2851 * amdgpu_device_check_vram_lost - check if vram is valid
2853 * @adev: amdgpu_device pointer
2855 * Checks the reset magic value written to the gart pointer in VRAM.
2856 * The driver calls this after a GPU reset to see if the contents of
2857 * VRAM is lost or now.
2858 * returns true if vram is lost, false if not.
2860 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2862 if (memcmp(adev->gart.ptr, adev->reset_magic,
2863 AMDGPU_RESET_MAGIC_NUM))
2866 if (!amdgpu_in_reset(adev))
2870 * For all ASICs with baco/mode1 reset, the VRAM is
2871 * always assumed to be lost.
2873 switch (amdgpu_asic_reset_method(adev)) {
2874 case AMD_RESET_METHOD_BACO:
2875 case AMD_RESET_METHOD_MODE1:
2883 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2885 * @adev: amdgpu_device pointer
2886 * @state: clockgating state (gate or ungate)
2888 * The list of all the hardware IPs that make up the asic is walked and the
2889 * set_clockgating_state callbacks are run.
2890 * Late initialization pass enabling clockgating for hardware IPs.
2891 * Fini or suspend, pass disabling clockgating for hardware IPs.
2892 * Returns 0 on success, negative error code on failure.
2895 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2896 enum amd_clockgating_state state)
2900 if (amdgpu_emu_mode == 1)
2903 for (j = 0; j < adev->num_ip_blocks; j++) {
2904 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2905 if (!adev->ip_blocks[i].status.late_initialized)
2907 /* skip CG for GFX, SDMA on S0ix */
2908 if (adev->in_s0ix &&
2909 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2910 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2912 /* skip CG for VCE/UVD, it's handled specially */
2913 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2914 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2915 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2916 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2917 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2918 /* enable clockgating to save power */
2919 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2922 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2923 adev->ip_blocks[i].version->funcs->name, r);
2932 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2933 enum amd_powergating_state state)
2937 if (amdgpu_emu_mode == 1)
2940 for (j = 0; j < adev->num_ip_blocks; j++) {
2941 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2942 if (!adev->ip_blocks[i].status.late_initialized)
2944 /* skip PG for GFX, SDMA on S0ix */
2945 if (adev->in_s0ix &&
2946 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2947 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2949 /* skip CG for VCE/UVD, it's handled specially */
2950 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2951 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2952 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2953 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2954 adev->ip_blocks[i].version->funcs->set_powergating_state) {
2955 /* enable powergating to save power */
2956 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2959 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2960 adev->ip_blocks[i].version->funcs->name, r);
2968 static int amdgpu_device_enable_mgpu_fan_boost(void)
2970 struct amdgpu_gpu_instance *gpu_ins;
2971 struct amdgpu_device *adev;
2974 mutex_lock(&mgpu_info.mutex);
2977 * MGPU fan boost feature should be enabled
2978 * only when there are two or more dGPUs in
2981 if (mgpu_info.num_dgpu < 2)
2984 for (i = 0; i < mgpu_info.num_dgpu; i++) {
2985 gpu_ins = &(mgpu_info.gpu_ins[i]);
2986 adev = gpu_ins->adev;
2987 if (!(adev->flags & AMD_IS_APU) &&
2988 !gpu_ins->mgpu_fan_enabled) {
2989 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2993 gpu_ins->mgpu_fan_enabled = 1;
2998 mutex_unlock(&mgpu_info.mutex);
3004 * amdgpu_device_ip_late_init - run late init for hardware IPs
3006 * @adev: amdgpu_device pointer
3008 * Late initialization pass for hardware IPs. The list of all the hardware
3009 * IPs that make up the asic is walked and the late_init callbacks are run.
3010 * late_init covers any special initialization that an IP requires
3011 * after all of the have been initialized or something that needs to happen
3012 * late in the init process.
3013 * Returns 0 on success, negative error code on failure.
3015 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
3017 struct amdgpu_gpu_instance *gpu_instance;
3020 for (i = 0; i < adev->num_ip_blocks; i++) {
3021 if (!adev->ip_blocks[i].status.hw)
3023 if (adev->ip_blocks[i].version->funcs->late_init) {
3024 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
3026 DRM_ERROR("late_init of IP block <%s> failed %d\n",
3027 adev->ip_blocks[i].version->funcs->name, r);
3031 adev->ip_blocks[i].status.late_initialized = true;
3034 r = amdgpu_ras_late_init(adev);
3036 DRM_ERROR("amdgpu_ras_late_init failed %d", r);
3040 amdgpu_ras_set_error_query_ready(adev, true);
3042 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
3043 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
3045 amdgpu_device_fill_reset_magic(adev);
3047 r = amdgpu_device_enable_mgpu_fan_boost();
3049 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
3051 /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
3052 if (amdgpu_passthrough(adev) &&
3053 ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) ||
3054 adev->asic_type == CHIP_ALDEBARAN))
3055 amdgpu_dpm_handle_passthrough_sbr(adev, true);
3057 if (adev->gmc.xgmi.num_physical_nodes > 1) {
3058 mutex_lock(&mgpu_info.mutex);
3061 * Reset device p-state to low as this was booted with high.
3063 * This should be performed only after all devices from the same
3064 * hive get initialized.
3066 * However, it's unknown how many device in the hive in advance.
3067 * As this is counted one by one during devices initializations.
3069 * So, we wait for all XGMI interlinked devices initialized.
3070 * This may bring some delays as those devices may come from
3071 * different hives. But that should be OK.
3073 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
3074 for (i = 0; i < mgpu_info.num_gpu; i++) {
3075 gpu_instance = &(mgpu_info.gpu_ins[i]);
3076 if (gpu_instance->adev->flags & AMD_IS_APU)
3079 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
3080 AMDGPU_XGMI_PSTATE_MIN);
3082 DRM_ERROR("pstate setting failed (%d).\n", r);
3088 mutex_unlock(&mgpu_info.mutex);
3095 * amdgpu_device_smu_fini_early - smu hw_fini wrapper
3097 * @adev: amdgpu_device pointer
3099 * For ASICs need to disable SMC first
3101 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
3105 if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
3108 for (i = 0; i < adev->num_ip_blocks; i++) {
3109 if (!adev->ip_blocks[i].status.hw)
3111 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3112 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3113 /* XXX handle errors */
3115 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
3116 adev->ip_blocks[i].version->funcs->name, r);
3118 adev->ip_blocks[i].status.hw = false;
3124 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
3128 for (i = 0; i < adev->num_ip_blocks; i++) {
3129 if (!adev->ip_blocks[i].version->funcs->early_fini)
3132 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
3134 DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
3135 adev->ip_blocks[i].version->funcs->name, r);
3139 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
3140 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
3142 amdgpu_amdkfd_suspend(adev, false);
3144 /* Workaroud for ASICs need to disable SMC first */
3145 amdgpu_device_smu_fini_early(adev);
3147 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3148 if (!adev->ip_blocks[i].status.hw)
3151 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3152 /* XXX handle errors */
3154 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
3155 adev->ip_blocks[i].version->funcs->name, r);
3158 adev->ip_blocks[i].status.hw = false;
3161 if (amdgpu_sriov_vf(adev)) {
3162 if (amdgpu_virt_release_full_gpu(adev, false))
3163 DRM_ERROR("failed to release exclusive mode on fini\n");
3170 * amdgpu_device_ip_fini - run fini for hardware IPs
3172 * @adev: amdgpu_device pointer
3174 * Main teardown pass for hardware IPs. The list of all the hardware
3175 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
3176 * are run. hw_fini tears down the hardware associated with each IP
3177 * and sw_fini tears down any software state associated with each IP.
3178 * Returns 0 on success, negative error code on failure.
3180 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
3184 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
3185 amdgpu_virt_release_ras_err_handler_data(adev);
3187 if (adev->gmc.xgmi.num_physical_nodes > 1)
3188 amdgpu_xgmi_remove_device(adev);
3190 amdgpu_amdkfd_device_fini_sw(adev);
3192 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3193 if (!adev->ip_blocks[i].status.sw)
3196 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
3197 amdgpu_ucode_free_bo(adev);
3198 amdgpu_free_static_csa(&adev->virt.csa_obj);
3199 amdgpu_device_wb_fini(adev);
3200 amdgpu_device_mem_scratch_fini(adev);
3201 amdgpu_ib_pool_fini(adev);
3202 amdgpu_seq64_fini(adev);
3205 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
3206 /* XXX handle errors */
3208 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
3209 adev->ip_blocks[i].version->funcs->name, r);
3211 adev->ip_blocks[i].status.sw = false;
3212 adev->ip_blocks[i].status.valid = false;
3215 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3216 if (!adev->ip_blocks[i].status.late_initialized)
3218 if (adev->ip_blocks[i].version->funcs->late_fini)
3219 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
3220 adev->ip_blocks[i].status.late_initialized = false;
3223 amdgpu_ras_fini(adev);
3229 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
3231 * @work: work_struct.
3233 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
3235 struct amdgpu_device *adev =
3236 container_of(work, struct amdgpu_device, delayed_init_work.work);
3239 r = amdgpu_ib_ring_tests(adev);
3241 DRM_ERROR("ib ring test failed (%d).\n", r);
3244 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
3246 struct amdgpu_device *adev =
3247 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
3249 WARN_ON_ONCE(adev->gfx.gfx_off_state);
3250 WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
3252 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
3253 adev->gfx.gfx_off_state = true;
3257 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
3259 * @adev: amdgpu_device pointer
3261 * Main suspend function for hardware IPs. The list of all the hardware
3262 * IPs that make up the asic is walked, clockgating is disabled and the
3263 * suspend callbacks are run. suspend puts the hardware and software state
3264 * in each IP into a state suitable for suspend.
3265 * Returns 0 on success, negative error code on failure.
3267 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
3271 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
3272 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
3275 * Per PMFW team's suggestion, driver needs to handle gfxoff
3276 * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
3277 * scenario. Add the missing df cstate disablement here.
3279 if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
3280 dev_warn(adev->dev, "Failed to disallow df cstate");
3282 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3283 if (!adev->ip_blocks[i].status.valid)
3286 /* displays are handled separately */
3287 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
3290 /* XXX handle errors */
3291 r = adev->ip_blocks[i].version->funcs->suspend(adev);
3292 /* XXX handle errors */
3294 DRM_ERROR("suspend of IP block <%s> failed %d\n",
3295 adev->ip_blocks[i].version->funcs->name, r);
3299 adev->ip_blocks[i].status.hw = false;
3306 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
3308 * @adev: amdgpu_device pointer
3310 * Main suspend function for hardware IPs. The list of all the hardware
3311 * IPs that make up the asic is walked, clockgating is disabled and the
3312 * suspend callbacks are run. suspend puts the hardware and software state
3313 * in each IP into a state suitable for suspend.
3314 * Returns 0 on success, negative error code on failure.
3316 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
3321 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
3323 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3324 if (!adev->ip_blocks[i].status.valid)
3326 /* displays are handled in phase1 */
3327 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
3329 /* PSP lost connection when err_event_athub occurs */
3330 if (amdgpu_ras_intr_triggered() &&
3331 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3332 adev->ip_blocks[i].status.hw = false;
3336 /* skip unnecessary suspend if we do not initialize them yet */
3337 if (adev->gmc.xgmi.pending_reset &&
3338 !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3339 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
3340 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3341 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
3342 adev->ip_blocks[i].status.hw = false;
3346 /* skip suspend of gfx/mes and psp for S0ix
3347 * gfx is in gfxoff state, so on resume it will exit gfxoff just
3348 * like at runtime. PSP is also part of the always on hardware
3349 * so no need to suspend it.
3351 if (adev->in_s0ix &&
3352 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3353 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3354 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
3357 /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
3358 if (adev->in_s0ix &&
3359 (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >=
3360 IP_VERSION(5, 0, 0)) &&
3361 (adev->ip_blocks[i].version->type ==
3362 AMD_IP_BLOCK_TYPE_SDMA))
3365 /* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.
3366 * These are in TMR, hence are expected to be reused by PSP-TOS to reload
3367 * from this location and RLC Autoload automatically also gets loaded
3368 * from here based on PMFW -> PSP message during re-init sequence.
3369 * Therefore, the psp suspend & resume should be skipped to avoid destroy
3370 * the TMR and reload FWs again for IMU enabled APU ASICs.
3372 if (amdgpu_in_reset(adev) &&
3373 (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
3374 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3377 /* XXX handle errors */
3378 r = adev->ip_blocks[i].version->funcs->suspend(adev);
3379 /* XXX handle errors */
3381 DRM_ERROR("suspend of IP block <%s> failed %d\n",
3382 adev->ip_blocks[i].version->funcs->name, r);
3384 adev->ip_blocks[i].status.hw = false;
3385 /* handle putting the SMC in the appropriate state */
3386 if (!amdgpu_sriov_vf(adev)) {
3387 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3388 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3390 DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3391 adev->mp1_state, r);
3402 * amdgpu_device_ip_suspend - run suspend for hardware IPs
3404 * @adev: amdgpu_device pointer
3406 * Main suspend function for hardware IPs. The list of all the hardware
3407 * IPs that make up the asic is walked, clockgating is disabled and the
3408 * suspend callbacks are run. suspend puts the hardware and software state
3409 * in each IP into a state suitable for suspend.
3410 * Returns 0 on success, negative error code on failure.
3412 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3416 if (amdgpu_sriov_vf(adev)) {
3417 amdgpu_virt_fini_data_exchange(adev);
3418 amdgpu_virt_request_full_gpu(adev, false);
3421 amdgpu_ttm_set_buffer_funcs_status(adev, false);
3423 r = amdgpu_device_ip_suspend_phase1(adev);
3426 r = amdgpu_device_ip_suspend_phase2(adev);
3428 if (amdgpu_sriov_vf(adev))
3429 amdgpu_virt_release_full_gpu(adev, false);
3434 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3438 static enum amd_ip_block_type ip_order[] = {
3439 AMD_IP_BLOCK_TYPE_COMMON,
3440 AMD_IP_BLOCK_TYPE_GMC,
3441 AMD_IP_BLOCK_TYPE_PSP,
3442 AMD_IP_BLOCK_TYPE_IH,
3445 for (i = 0; i < adev->num_ip_blocks; i++) {
3447 struct amdgpu_ip_block *block;
3449 block = &adev->ip_blocks[i];
3450 block->status.hw = false;
3452 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3454 if (block->version->type != ip_order[j] ||
3455 !block->status.valid)
3458 r = block->version->funcs->hw_init(adev);
3459 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3462 block->status.hw = true;
3469 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3473 static enum amd_ip_block_type ip_order[] = {
3474 AMD_IP_BLOCK_TYPE_SMC,
3475 AMD_IP_BLOCK_TYPE_DCE,
3476 AMD_IP_BLOCK_TYPE_GFX,
3477 AMD_IP_BLOCK_TYPE_SDMA,
3478 AMD_IP_BLOCK_TYPE_MES,
3479 AMD_IP_BLOCK_TYPE_UVD,
3480 AMD_IP_BLOCK_TYPE_VCE,
3481 AMD_IP_BLOCK_TYPE_VCN,
3482 AMD_IP_BLOCK_TYPE_JPEG
3485 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3487 struct amdgpu_ip_block *block;
3489 for (j = 0; j < adev->num_ip_blocks; j++) {
3490 block = &adev->ip_blocks[j];
3492 if (block->version->type != ip_order[i] ||
3493 !block->status.valid ||
3497 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3498 r = block->version->funcs->resume(adev);
3500 r = block->version->funcs->hw_init(adev);
3502 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3505 block->status.hw = true;
3513 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3515 * @adev: amdgpu_device pointer
3517 * First resume function for hardware IPs. The list of all the hardware
3518 * IPs that make up the asic is walked and the resume callbacks are run for
3519 * COMMON, GMC, and IH. resume puts the hardware into a functional state
3520 * after a suspend and updates the software state as necessary. This
3521 * function is also used for restoring the GPU after a GPU reset.
3522 * Returns 0 on success, negative error code on failure.
3524 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3528 for (i = 0; i < adev->num_ip_blocks; i++) {
3529 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3531 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3532 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3533 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3534 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3536 r = adev->ip_blocks[i].version->funcs->resume(adev);
3538 DRM_ERROR("resume of IP block <%s> failed %d\n",
3539 adev->ip_blocks[i].version->funcs->name, r);
3542 adev->ip_blocks[i].status.hw = true;
3550 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3552 * @adev: amdgpu_device pointer
3554 * First resume function for hardware IPs. The list of all the hardware
3555 * IPs that make up the asic is walked and the resume callbacks are run for
3556 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
3557 * functional state after a suspend and updates the software state as
3558 * necessary. This function is also used for restoring the GPU after a GPU
3560 * Returns 0 on success, negative error code on failure.
3562 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3566 for (i = 0; i < adev->num_ip_blocks; i++) {
3567 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3569 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3570 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3571 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3572 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3574 r = adev->ip_blocks[i].version->funcs->resume(adev);
3576 DRM_ERROR("resume of IP block <%s> failed %d\n",
3577 adev->ip_blocks[i].version->funcs->name, r);
3580 adev->ip_blocks[i].status.hw = true;
3587 * amdgpu_device_ip_resume - run resume for hardware IPs
3589 * @adev: amdgpu_device pointer
3591 * Main resume function for hardware IPs. The hardware IPs
3592 * are split into two resume functions because they are
3593 * also used in recovering from a GPU reset and some additional
3594 * steps need to be take between them. In this case (S3/S4) they are
3596 * Returns 0 on success, negative error code on failure.
3598 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3602 r = amdgpu_device_ip_resume_phase1(adev);
3606 r = amdgpu_device_fw_loading(adev);
3610 r = amdgpu_device_ip_resume_phase2(adev);
3612 if (adev->mman.buffer_funcs_ring->sched.ready)
3613 amdgpu_ttm_set_buffer_funcs_status(adev, true);
3619 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3621 * @adev: amdgpu_device pointer
3623 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3625 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3627 if (amdgpu_sriov_vf(adev)) {
3628 if (adev->is_atom_fw) {
3629 if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3630 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3632 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3633 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3636 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3637 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3642 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3644 * @asic_type: AMD asic type
3646 * Check if there is DC (new modesetting infrastructre) support for an asic.
3647 * returns true if DC has support, false if not.
3649 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3651 switch (asic_type) {
3652 #ifdef CONFIG_DRM_AMDGPU_SI
3656 /* chips with no display hardware */
3658 #if defined(CONFIG_DRM_AMD_DC)
3664 * We have systems in the wild with these ASICs that require
3665 * LVDS and VGA support which is not supported with DC.
3667 * Fallback to the non-DC driver here by default so as not to
3668 * cause regressions.
3670 #if defined(CONFIG_DRM_AMD_DC_SI)
3671 return amdgpu_dc > 0;
3680 * We have systems in the wild with these ASICs that require
3681 * VGA support which is not supported with DC.
3683 * Fallback to the non-DC driver here by default so as not to
3684 * cause regressions.
3686 return amdgpu_dc > 0;
3688 return amdgpu_dc != 0;
3692 DRM_INFO_ONCE("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
3699 * amdgpu_device_has_dc_support - check if dc is supported
3701 * @adev: amdgpu_device pointer
3703 * Returns true for supported, false for not supported
3705 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3707 if (adev->enable_virtual_display ||
3708 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3711 return amdgpu_device_asic_has_dc_support(adev->asic_type);
3714 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3716 struct amdgpu_device *adev =
3717 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3718 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3720 /* It's a bug to not have a hive within this function */
3725 * Use task barrier to synchronize all xgmi reset works across the
3726 * hive. task_barrier_enter and task_barrier_exit will block
3727 * until all the threads running the xgmi reset works reach
3728 * those points. task_barrier_full will do both blocks.
3730 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3732 task_barrier_enter(&hive->tb);
3733 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3735 if (adev->asic_reset_res)
3738 task_barrier_exit(&hive->tb);
3739 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3741 if (adev->asic_reset_res)
3744 amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
3747 task_barrier_full(&hive->tb);
3748 adev->asic_reset_res = amdgpu_asic_reset(adev);
3752 if (adev->asic_reset_res)
3753 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3754 adev->asic_reset_res, adev_to_drm(adev)->unique);
3755 amdgpu_put_xgmi_hive(hive);
3758 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3760 char *input = amdgpu_lockup_timeout;
3761 char *timeout_setting = NULL;
3767 * By default timeout for non compute jobs is 10000
3768 * and 60000 for compute jobs.
3769 * In SR-IOV or passthrough mode, timeout for compute
3770 * jobs are 60000 by default.
3772 adev->gfx_timeout = msecs_to_jiffies(10000);
3773 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3774 if (amdgpu_sriov_vf(adev))
3775 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3776 msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3778 adev->compute_timeout = msecs_to_jiffies(60000);
3780 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3781 while ((timeout_setting = strsep(&input, ",")) &&
3782 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3783 ret = kstrtol(timeout_setting, 0, &timeout);
3790 } else if (timeout < 0) {
3791 timeout = MAX_SCHEDULE_TIMEOUT;
3792 dev_warn(adev->dev, "lockup timeout disabled");
3793 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3795 timeout = msecs_to_jiffies(timeout);
3800 adev->gfx_timeout = timeout;
3803 adev->compute_timeout = timeout;
3806 adev->sdma_timeout = timeout;
3809 adev->video_timeout = timeout;
3816 * There is only one value specified and
3817 * it should apply to all non-compute jobs.
3820 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3821 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3822 adev->compute_timeout = adev->gfx_timeout;
3830 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3832 * @adev: amdgpu_device pointer
3834 * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3836 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3838 struct iommu_domain *domain;
3840 domain = iommu_get_domain_for_dev(adev->dev);
3841 if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3842 adev->ram_is_direct_mapped = true;
3845 static const struct attribute *amdgpu_dev_attributes[] = {
3846 &dev_attr_pcie_replay_count.attr,
3850 static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
3852 if (amdgpu_mcbp == 1)
3853 adev->gfx.mcbp = true;
3854 else if (amdgpu_mcbp == 0)
3855 adev->gfx.mcbp = false;
3857 if (amdgpu_sriov_vf(adev))
3858 adev->gfx.mcbp = true;
3861 DRM_INFO("MCBP is enabled\n");
3865 * amdgpu_device_init - initialize the driver
3867 * @adev: amdgpu_device pointer
3868 * @flags: driver flags
3870 * Initializes the driver info and hw (all asics).
3871 * Returns 0 for success or an error on failure.
3872 * Called at driver startup.
3874 int amdgpu_device_init(struct amdgpu_device *adev,
3877 struct drm_device *ddev = adev_to_drm(adev);
3878 struct pci_dev *pdev = adev->pdev;
3884 adev->shutdown = false;
3885 adev->flags = flags;
3887 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3888 adev->asic_type = amdgpu_force_asic_type;
3890 adev->asic_type = flags & AMD_ASIC_MASK;
3892 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3893 if (amdgpu_emu_mode == 1)
3894 adev->usec_timeout *= 10;
3895 adev->gmc.gart_size = 512 * 1024 * 1024;
3896 adev->accel_working = false;
3897 adev->num_rings = 0;
3898 RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3899 adev->mman.buffer_funcs = NULL;
3900 adev->mman.buffer_funcs_ring = NULL;
3901 adev->vm_manager.vm_pte_funcs = NULL;
3902 adev->vm_manager.vm_pte_num_scheds = 0;
3903 adev->gmc.gmc_funcs = NULL;
3904 adev->harvest_ip_mask = 0x0;
3905 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3906 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3908 adev->smc_rreg = &amdgpu_invalid_rreg;
3909 adev->smc_wreg = &amdgpu_invalid_wreg;
3910 adev->pcie_rreg = &amdgpu_invalid_rreg;
3911 adev->pcie_wreg = &amdgpu_invalid_wreg;
3912 adev->pcie_rreg_ext = &amdgpu_invalid_rreg_ext;
3913 adev->pcie_wreg_ext = &amdgpu_invalid_wreg_ext;
3914 adev->pciep_rreg = &amdgpu_invalid_rreg;
3915 adev->pciep_wreg = &amdgpu_invalid_wreg;
3916 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3917 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3918 adev->pcie_rreg64_ext = &amdgpu_invalid_rreg64_ext;
3919 adev->pcie_wreg64_ext = &amdgpu_invalid_wreg64_ext;
3920 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3921 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3922 adev->didt_rreg = &amdgpu_invalid_rreg;
3923 adev->didt_wreg = &amdgpu_invalid_wreg;
3924 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3925 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3926 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3927 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3929 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3930 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3931 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3933 /* mutex initialization are all done here so we
3934 * can recall function without having locking issues
3936 mutex_init(&adev->firmware.mutex);
3937 mutex_init(&adev->pm.mutex);
3938 mutex_init(&adev->gfx.gpu_clock_mutex);
3939 mutex_init(&adev->srbm_mutex);
3940 mutex_init(&adev->gfx.pipe_reserve_mutex);
3941 mutex_init(&adev->gfx.gfx_off_mutex);
3942 mutex_init(&adev->gfx.partition_mutex);
3943 mutex_init(&adev->grbm_idx_mutex);
3944 mutex_init(&adev->mn_lock);
3945 mutex_init(&adev->virt.vf_errors.lock);
3946 hash_init(adev->mn_hash);
3947 mutex_init(&adev->psp.mutex);
3948 mutex_init(&adev->notifier_lock);
3949 mutex_init(&adev->pm.stable_pstate_ctx_lock);
3950 mutex_init(&adev->benchmark_mutex);
3952 amdgpu_device_init_apu_flags(adev);
3954 r = amdgpu_device_check_arguments(adev);
3958 spin_lock_init(&adev->mmio_idx_lock);
3959 spin_lock_init(&adev->smc_idx_lock);
3960 spin_lock_init(&adev->pcie_idx_lock);
3961 spin_lock_init(&adev->uvd_ctx_idx_lock);
3962 spin_lock_init(&adev->didt_idx_lock);
3963 spin_lock_init(&adev->gc_cac_idx_lock);
3964 spin_lock_init(&adev->se_cac_idx_lock);
3965 spin_lock_init(&adev->audio_endpt_idx_lock);
3966 spin_lock_init(&adev->mm_stats.lock);
3968 INIT_LIST_HEAD(&adev->shadow_list);
3969 mutex_init(&adev->shadow_list_lock);
3971 INIT_LIST_HEAD(&adev->reset_list);
3973 INIT_LIST_HEAD(&adev->ras_list);
3975 INIT_LIST_HEAD(&adev->pm.od_kobj_list);
3977 INIT_DELAYED_WORK(&adev->delayed_init_work,
3978 amdgpu_device_delayed_init_work_handler);
3979 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3980 amdgpu_device_delay_enable_gfx_off);
3982 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3984 adev->gfx.gfx_off_req_count = 1;
3985 adev->gfx.gfx_off_residency = 0;
3986 adev->gfx.gfx_off_entrycount = 0;
3987 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3989 atomic_set(&adev->throttling_logging_enabled, 1);
3991 * If throttling continues, logging will be performed every minute
3992 * to avoid log flooding. "-1" is subtracted since the thermal
3993 * throttling interrupt comes every second. Thus, the total logging
3994 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3995 * for throttling interrupt) = 60 seconds.
3997 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3998 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
4000 /* Registers mapping */
4001 /* TODO: block userspace mapping of io register */
4002 if (adev->asic_type >= CHIP_BONAIRE) {
4003 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
4004 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
4006 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
4007 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
4010 for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
4011 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
4013 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
4017 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
4018 DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size);
4021 * Reset domain needs to be present early, before XGMI hive discovered
4022 * (if any) and intitialized to use reset sem and in_gpu reset flag
4023 * early on during init and before calling to RREG32.
4025 adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
4026 if (!adev->reset_domain)
4029 /* detect hw virtualization here */
4030 amdgpu_detect_virtualization(adev);
4032 amdgpu_device_get_pcie_info(adev);
4034 r = amdgpu_device_get_job_timeout_settings(adev);
4036 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
4040 /* early init functions */
4041 r = amdgpu_device_ip_early_init(adev);
4045 amdgpu_device_set_mcbp(adev);
4047 /* Get rid of things like offb */
4048 r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
4052 /* Enable TMZ based on IP_VERSION */
4053 amdgpu_gmc_tmz_set(adev);
4055 amdgpu_gmc_noretry_set(adev);
4056 /* Need to get xgmi info early to decide the reset behavior*/
4057 if (adev->gmc.xgmi.supported) {
4058 r = adev->gfxhub.funcs->get_xgmi_info(adev);
4063 /* enable PCIE atomic ops */
4064 if (amdgpu_sriov_vf(adev)) {
4065 if (adev->virt.fw_reserve.p_pf2vf)
4066 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
4067 adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
4068 (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
4069 /* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
4070 * internal path natively support atomics, set have_atomics_support to true.
4072 } else if ((adev->flags & AMD_IS_APU) &&
4073 (amdgpu_ip_version(adev, GC_HWIP, 0) >
4074 IP_VERSION(9, 0, 0))) {
4075 adev->have_atomics_support = true;
4077 adev->have_atomics_support =
4078 !pci_enable_atomic_ops_to_root(adev->pdev,
4079 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
4080 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
4083 if (!adev->have_atomics_support)
4084 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
4086 /* doorbell bar mapping and doorbell index init*/
4087 amdgpu_doorbell_init(adev);
4089 if (amdgpu_emu_mode == 1) {
4090 /* post the asic on emulation mode */
4091 emu_soc_asic_init(adev);
4092 goto fence_driver_init;
4095 amdgpu_reset_init(adev);
4097 /* detect if we are with an SRIOV vbios */
4099 amdgpu_device_detect_sriov_bios(adev);
4101 /* check if we need to reset the asic
4102 * E.g., driver was not cleanly unloaded previously, etc.
4104 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
4105 if (adev->gmc.xgmi.num_physical_nodes) {
4106 dev_info(adev->dev, "Pending hive reset.\n");
4107 adev->gmc.xgmi.pending_reset = true;
4108 /* Only need to init necessary block for SMU to handle the reset */
4109 for (i = 0; i < adev->num_ip_blocks; i++) {
4110 if (!adev->ip_blocks[i].status.valid)
4112 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
4113 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
4114 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
4115 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
4116 DRM_DEBUG("IP %s disabled for hw_init.\n",
4117 adev->ip_blocks[i].version->funcs->name);
4118 adev->ip_blocks[i].status.hw = true;
4122 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
4123 case IP_VERSION(13, 0, 0):
4124 case IP_VERSION(13, 0, 7):
4125 case IP_VERSION(13, 0, 10):
4126 r = psp_gpu_reset(adev);
4129 tmp = amdgpu_reset_method;
4130 /* It should do a default reset when loading or reloading the driver,
4131 * regardless of the module parameter reset_method.
4133 amdgpu_reset_method = AMD_RESET_METHOD_NONE;
4134 r = amdgpu_asic_reset(adev);
4135 amdgpu_reset_method = tmp;
4140 dev_err(adev->dev, "asic reset on init failed\n");
4146 /* Post card if necessary */
4147 if (amdgpu_device_need_post(adev)) {
4149 dev_err(adev->dev, "no vBIOS found\n");
4153 DRM_INFO("GPU posting now...\n");
4154 r = amdgpu_device_asic_init(adev);
4156 dev_err(adev->dev, "gpu post error!\n");
4162 if (adev->is_atom_fw) {
4163 /* Initialize clocks */
4164 r = amdgpu_atomfirmware_get_clock_info(adev);
4166 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
4167 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
4171 /* Initialize clocks */
4172 r = amdgpu_atombios_get_clock_info(adev);
4174 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
4175 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
4178 /* init i2c buses */
4179 if (!amdgpu_device_has_dc_support(adev))
4180 amdgpu_atombios_i2c_init(adev);
4186 r = amdgpu_fence_driver_sw_init(adev);
4188 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
4189 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
4193 /* init the mode config */
4194 drm_mode_config_init(adev_to_drm(adev));
4196 r = amdgpu_device_ip_init(adev);
4198 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
4199 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
4200 goto release_ras_con;
4203 amdgpu_fence_driver_hw_init(adev);
4206 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
4207 adev->gfx.config.max_shader_engines,
4208 adev->gfx.config.max_sh_per_se,
4209 adev->gfx.config.max_cu_per_sh,
4210 adev->gfx.cu_info.number);
4212 adev->accel_working = true;
4214 amdgpu_vm_check_compute_bug(adev);
4216 /* Initialize the buffer migration limit. */
4217 if (amdgpu_moverate >= 0)
4218 max_MBps = amdgpu_moverate;
4220 max_MBps = 8; /* Allow 8 MB/s. */
4221 /* Get a log2 for easy divisions. */
4222 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
4225 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
4226 * Otherwise the mgpu fan boost feature will be skipped due to the
4227 * gpu instance is counted less.
4229 amdgpu_register_gpu_instance(adev);
4231 /* enable clockgating, etc. after ib tests, etc. since some blocks require
4232 * explicit gating rather than handling it automatically.
4234 if (!adev->gmc.xgmi.pending_reset) {
4235 r = amdgpu_device_ip_late_init(adev);
4237 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
4238 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
4239 goto release_ras_con;
4242 amdgpu_ras_resume(adev);
4243 queue_delayed_work(system_wq, &adev->delayed_init_work,
4244 msecs_to_jiffies(AMDGPU_RESUME_MS));
4247 if (amdgpu_sriov_vf(adev)) {
4248 amdgpu_virt_release_full_gpu(adev, true);
4249 flush_delayed_work(&adev->delayed_init_work);
4253 * Place those sysfs registering after `late_init`. As some of those
4254 * operations performed in `late_init` might affect the sysfs
4255 * interfaces creating.
4257 r = amdgpu_atombios_sysfs_init(adev);
4259 drm_err(&adev->ddev,
4260 "registering atombios sysfs failed (%d).\n", r);
4262 r = amdgpu_pm_sysfs_init(adev);
4264 DRM_ERROR("registering pm sysfs failed (%d).\n", r);
4266 r = amdgpu_ucode_sysfs_init(adev);
4268 adev->ucode_sysfs_en = false;
4269 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
4271 adev->ucode_sysfs_en = true;
4273 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
4275 dev_err(adev->dev, "Could not create amdgpu device attr\n");
4277 r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group);
4280 "Could not create amdgpu board attributes\n");
4282 amdgpu_fru_sysfs_init(adev);
4283 amdgpu_reg_state_sysfs_init(adev);
4285 if (IS_ENABLED(CONFIG_PERF_EVENTS))
4286 r = amdgpu_pmu_init(adev);
4288 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
4290 /* Have stored pci confspace at hand for restore in sudden PCI error */
4291 if (amdgpu_device_cache_pci_state(adev->pdev))
4292 pci_restore_state(pdev);
4294 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
4295 /* this will fail for cards that aren't VGA class devices, just
4298 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4299 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
4301 px = amdgpu_device_supports_px(ddev);
4303 if (px || (!dev_is_removable(&adev->pdev->dev) &&
4304 apple_gmux_detect(NULL, NULL)))
4305 vga_switcheroo_register_client(adev->pdev,
4306 &amdgpu_switcheroo_ops, px);
4309 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
4311 if (adev->gmc.xgmi.pending_reset)
4312 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
4313 msecs_to_jiffies(AMDGPU_RESUME_MS));
4315 amdgpu_device_check_iommu_direct_map(adev);
4320 if (amdgpu_sriov_vf(adev))
4321 amdgpu_virt_release_full_gpu(adev, true);
4323 /* failed in exclusive mode due to timeout */
4324 if (amdgpu_sriov_vf(adev) &&
4325 !amdgpu_sriov_runtime(adev) &&
4326 amdgpu_virt_mmio_blocked(adev) &&
4327 !amdgpu_virt_wait_reset(adev)) {
4328 dev_err(adev->dev, "VF exclusive mode timeout\n");
4329 /* Don't send request since VF is inactive. */
4330 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
4331 adev->virt.ops = NULL;
4334 amdgpu_release_ras_context(adev);
4337 amdgpu_vf_error_trans_all(adev);
4342 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
4345 /* Clear all CPU mappings pointing to this device */
4346 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
4348 /* Unmap all mapped bars - Doorbell, registers and VRAM */
4349 amdgpu_doorbell_fini(adev);
4351 iounmap(adev->rmmio);
4353 if (adev->mman.aper_base_kaddr)
4354 iounmap(adev->mman.aper_base_kaddr);
4355 adev->mman.aper_base_kaddr = NULL;
4357 /* Memory manager related */
4358 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
4359 arch_phys_wc_del(adev->gmc.vram_mtrr);
4360 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
4365 * amdgpu_device_fini_hw - tear down the driver
4367 * @adev: amdgpu_device pointer
4369 * Tear down the driver info (all asics).
4370 * Called at driver shutdown.
4372 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
4374 dev_info(adev->dev, "amdgpu: finishing device.\n");
4375 flush_delayed_work(&adev->delayed_init_work);
4376 adev->shutdown = true;
4378 /* make sure IB test finished before entering exclusive mode
4379 * to avoid preemption on IB test
4381 if (amdgpu_sriov_vf(adev)) {
4382 amdgpu_virt_request_full_gpu(adev, false);
4383 amdgpu_virt_fini_data_exchange(adev);
4386 /* disable all interrupts */
4387 amdgpu_irq_disable_all(adev);
4388 if (adev->mode_info.mode_config_initialized) {
4389 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
4390 drm_helper_force_disable_all(adev_to_drm(adev));
4392 drm_atomic_helper_shutdown(adev_to_drm(adev));
4394 amdgpu_fence_driver_hw_fini(adev);
4396 if (adev->mman.initialized)
4397 drain_workqueue(adev->mman.bdev.wq);
4399 if (adev->pm.sysfs_initialized)
4400 amdgpu_pm_sysfs_fini(adev);
4401 if (adev->ucode_sysfs_en)
4402 amdgpu_ucode_sysfs_fini(adev);
4403 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
4404 amdgpu_fru_sysfs_fini(adev);
4406 amdgpu_reg_state_sysfs_fini(adev);
4408 /* disable ras feature must before hw fini */
4409 amdgpu_ras_pre_fini(adev);
4411 amdgpu_ttm_set_buffer_funcs_status(adev, false);
4413 amdgpu_device_ip_fini_early(adev);
4415 amdgpu_irq_fini_hw(adev);
4417 if (adev->mman.initialized)
4418 ttm_device_clear_dma_mappings(&adev->mman.bdev);
4420 amdgpu_gart_dummy_page_fini(adev);
4422 if (drm_dev_is_unplugged(adev_to_drm(adev)))
4423 amdgpu_device_unmap_mmio(adev);
4427 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4432 amdgpu_fence_driver_sw_fini(adev);
4433 amdgpu_device_ip_fini(adev);
4434 amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
4435 adev->accel_working = false;
4436 dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4438 amdgpu_reset_fini(adev);
4440 /* free i2c buses */
4441 if (!amdgpu_device_has_dc_support(adev))
4442 amdgpu_i2c_fini(adev);
4444 if (amdgpu_emu_mode != 1)
4445 amdgpu_atombios_fini(adev);
4450 kfree(adev->fru_info);
4451 adev->fru_info = NULL;
4453 px = amdgpu_device_supports_px(adev_to_drm(adev));
4455 if (px || (!dev_is_removable(&adev->pdev->dev) &&
4456 apple_gmux_detect(NULL, NULL)))
4457 vga_switcheroo_unregister_client(adev->pdev);
4460 vga_switcheroo_fini_domain_pm_ops(adev->dev);
4462 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4463 vga_client_unregister(adev->pdev);
4465 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4467 iounmap(adev->rmmio);
4469 amdgpu_doorbell_fini(adev);
4473 if (IS_ENABLED(CONFIG_PERF_EVENTS))
4474 amdgpu_pmu_fini(adev);
4475 if (adev->mman.discovery_bin)
4476 amdgpu_discovery_fini(adev);
4478 amdgpu_reset_put_reset_domain(adev->reset_domain);
4479 adev->reset_domain = NULL;
4481 kfree(adev->pci_state);
4486 * amdgpu_device_evict_resources - evict device resources
4487 * @adev: amdgpu device object
4489 * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4490 * of the vram memory type. Mainly used for evicting device resources
4494 static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4498 /* No need to evict vram on APUs for suspend to ram or s2idle */
4499 if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4502 ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4504 DRM_WARN("evicting device resources failed\n");
4512 * amdgpu_device_prepare - prepare for device suspend
4514 * @dev: drm dev pointer
4516 * Prepare to put the hw in the suspend state (all asics).
4517 * Returns 0 for success or an error on failure.
4518 * Called at driver suspend.
4520 int amdgpu_device_prepare(struct drm_device *dev)
4522 struct amdgpu_device *adev = drm_to_adev(dev);
4525 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4528 /* Evict the majority of BOs before starting suspend sequence */
4529 r = amdgpu_device_evict_resources(adev);
4533 for (i = 0; i < adev->num_ip_blocks; i++) {
4534 if (!adev->ip_blocks[i].status.valid)
4536 if (!adev->ip_blocks[i].version->funcs->prepare_suspend)
4538 r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev);
4547 * amdgpu_device_suspend - initiate device suspend
4549 * @dev: drm dev pointer
4550 * @fbcon : notify the fbdev of suspend
4552 * Puts the hw in the suspend state (all asics).
4553 * Returns 0 for success or an error on failure.
4554 * Called at driver suspend.
4556 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4558 struct amdgpu_device *adev = drm_to_adev(dev);
4561 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4564 adev->in_suspend = true;
4566 if (amdgpu_sriov_vf(adev)) {
4567 amdgpu_virt_fini_data_exchange(adev);
4568 r = amdgpu_virt_request_full_gpu(adev, false);
4573 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4574 DRM_WARN("smart shift update failed\n");
4577 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4579 cancel_delayed_work_sync(&adev->delayed_init_work);
4580 flush_delayed_work(&adev->gfx.gfx_off_delay_work);
4582 amdgpu_ras_suspend(adev);
4584 amdgpu_device_ip_suspend_phase1(adev);
4587 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4589 r = amdgpu_device_evict_resources(adev);
4593 amdgpu_ttm_set_buffer_funcs_status(adev, false);
4595 amdgpu_fence_driver_hw_fini(adev);
4597 amdgpu_device_ip_suspend_phase2(adev);
4599 if (amdgpu_sriov_vf(adev))
4600 amdgpu_virt_release_full_gpu(adev, false);
4602 r = amdgpu_dpm_notify_rlc_state(adev, false);
4610 * amdgpu_device_resume - initiate device resume
4612 * @dev: drm dev pointer
4613 * @fbcon : notify the fbdev of resume
4615 * Bring the hw back to operating state (all asics).
4616 * Returns 0 for success or an error on failure.
4617 * Called at driver resume.
4619 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4621 struct amdgpu_device *adev = drm_to_adev(dev);
4624 if (amdgpu_sriov_vf(adev)) {
4625 r = amdgpu_virt_request_full_gpu(adev, true);
4630 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4634 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4637 if (amdgpu_device_need_post(adev)) {
4638 r = amdgpu_device_asic_init(adev);
4640 dev_err(adev->dev, "amdgpu asic init failed\n");
4643 r = amdgpu_device_ip_resume(adev);
4646 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4649 amdgpu_fence_driver_hw_init(adev);
4651 if (!adev->in_s0ix) {
4652 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4657 r = amdgpu_device_ip_late_init(adev);
4661 queue_delayed_work(system_wq, &adev->delayed_init_work,
4662 msecs_to_jiffies(AMDGPU_RESUME_MS));
4664 if (amdgpu_sriov_vf(adev)) {
4665 amdgpu_virt_init_data_exchange(adev);
4666 amdgpu_virt_release_full_gpu(adev, true);
4672 /* Make sure IB tests flushed */
4673 flush_delayed_work(&adev->delayed_init_work);
4676 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4678 amdgpu_ras_resume(adev);
4680 if (adev->mode_info.num_crtc) {
4682 * Most of the connector probing functions try to acquire runtime pm
4683 * refs to ensure that the GPU is powered on when connector polling is
4684 * performed. Since we're calling this from a runtime PM callback,
4685 * trying to acquire rpm refs will cause us to deadlock.
4687 * Since we're guaranteed to be holding the rpm lock, it's safe to
4688 * temporarily disable the rpm helpers so this doesn't deadlock us.
4691 dev->dev->power.disable_depth++;
4693 if (!adev->dc_enabled)
4694 drm_helper_hpd_irq_event(dev);
4696 drm_kms_helper_hotplug_event(dev);
4698 dev->dev->power.disable_depth--;
4701 adev->in_suspend = false;
4703 if (adev->enable_mes)
4704 amdgpu_mes_self_test(adev);
4706 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4707 DRM_WARN("smart shift update failed\n");
4713 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4715 * @adev: amdgpu_device pointer
4717 * The list of all the hardware IPs that make up the asic is walked and
4718 * the check_soft_reset callbacks are run. check_soft_reset determines
4719 * if the asic is still hung or not.
4720 * Returns true if any of the IPs are still in a hung state, false if not.
4722 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4725 bool asic_hang = false;
4727 if (amdgpu_sriov_vf(adev))
4730 if (amdgpu_asic_need_full_reset(adev))
4733 for (i = 0; i < adev->num_ip_blocks; i++) {
4734 if (!adev->ip_blocks[i].status.valid)
4736 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4737 adev->ip_blocks[i].status.hang =
4738 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4739 if (adev->ip_blocks[i].status.hang) {
4740 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4748 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4750 * @adev: amdgpu_device pointer
4752 * The list of all the hardware IPs that make up the asic is walked and the
4753 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
4754 * handles any IP specific hardware or software state changes that are
4755 * necessary for a soft reset to succeed.
4756 * Returns 0 on success, negative error code on failure.
4758 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4762 for (i = 0; i < adev->num_ip_blocks; i++) {
4763 if (!adev->ip_blocks[i].status.valid)
4765 if (adev->ip_blocks[i].status.hang &&
4766 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4767 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4777 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4779 * @adev: amdgpu_device pointer
4781 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
4782 * reset is necessary to recover.
4783 * Returns true if a full asic reset is required, false if not.
4785 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4789 if (amdgpu_asic_need_full_reset(adev))
4792 for (i = 0; i < adev->num_ip_blocks; i++) {
4793 if (!adev->ip_blocks[i].status.valid)
4795 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4796 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4797 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4798 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4799 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4800 if (adev->ip_blocks[i].status.hang) {
4801 dev_info(adev->dev, "Some block need full reset!\n");
4810 * amdgpu_device_ip_soft_reset - do a soft reset
4812 * @adev: amdgpu_device pointer
4814 * The list of all the hardware IPs that make up the asic is walked and the
4815 * soft_reset callbacks are run if the block is hung. soft_reset handles any
4816 * IP specific hardware or software state changes that are necessary to soft
4818 * Returns 0 on success, negative error code on failure.
4820 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4824 for (i = 0; i < adev->num_ip_blocks; i++) {
4825 if (!adev->ip_blocks[i].status.valid)
4827 if (adev->ip_blocks[i].status.hang &&
4828 adev->ip_blocks[i].version->funcs->soft_reset) {
4829 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4839 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4841 * @adev: amdgpu_device pointer
4843 * The list of all the hardware IPs that make up the asic is walked and the
4844 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
4845 * handles any IP specific hardware or software state changes that are
4846 * necessary after the IP has been soft reset.
4847 * Returns 0 on success, negative error code on failure.
4849 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4853 for (i = 0; i < adev->num_ip_blocks; i++) {
4854 if (!adev->ip_blocks[i].status.valid)
4856 if (adev->ip_blocks[i].status.hang &&
4857 adev->ip_blocks[i].version->funcs->post_soft_reset)
4858 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4867 * amdgpu_device_recover_vram - Recover some VRAM contents
4869 * @adev: amdgpu_device pointer
4871 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
4872 * restore things like GPUVM page tables after a GPU reset where
4873 * the contents of VRAM might be lost.
4876 * 0 on success, negative error code on failure.
4878 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4880 struct dma_fence *fence = NULL, *next = NULL;
4881 struct amdgpu_bo *shadow;
4882 struct amdgpu_bo_vm *vmbo;
4885 if (amdgpu_sriov_runtime(adev))
4886 tmo = msecs_to_jiffies(8000);
4888 tmo = msecs_to_jiffies(100);
4890 dev_info(adev->dev, "recover vram bo from shadow start\n");
4891 mutex_lock(&adev->shadow_list_lock);
4892 list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4893 /* If vm is compute context or adev is APU, shadow will be NULL */
4896 shadow = vmbo->shadow;
4898 /* No need to recover an evicted BO */
4899 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4900 shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4901 shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4904 r = amdgpu_bo_restore_shadow(shadow, &next);
4909 tmo = dma_fence_wait_timeout(fence, false, tmo);
4910 dma_fence_put(fence);
4915 } else if (tmo < 0) {
4923 mutex_unlock(&adev->shadow_list_lock);
4926 tmo = dma_fence_wait_timeout(fence, false, tmo);
4927 dma_fence_put(fence);
4929 if (r < 0 || tmo <= 0) {
4930 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4934 dev_info(adev->dev, "recover vram bo from shadow done\n");
4940 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4942 * @adev: amdgpu_device pointer
4943 * @from_hypervisor: request from hypervisor
4945 * do VF FLR and reinitialize Asic
4946 * return 0 means succeeded otherwise failed
4948 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4949 bool from_hypervisor)
4952 struct amdgpu_hive_info *hive = NULL;
4953 int retry_limit = 0;
4956 amdgpu_amdkfd_pre_reset(adev);
4958 if (from_hypervisor)
4959 r = amdgpu_virt_request_full_gpu(adev, true);
4961 r = amdgpu_virt_reset_gpu(adev);
4964 amdgpu_irq_gpu_reset_resume_helper(adev);
4966 /* some sw clean up VF needs to do before recover */
4967 amdgpu_virt_post_reset(adev);
4969 /* Resume IP prior to SMC */
4970 r = amdgpu_device_ip_reinit_early_sriov(adev);
4974 amdgpu_virt_init_data_exchange(adev);
4976 r = amdgpu_device_fw_loading(adev);
4980 /* now we are okay to resume SMC/CP/SDMA */
4981 r = amdgpu_device_ip_reinit_late_sriov(adev);
4985 hive = amdgpu_get_xgmi_hive(adev);
4986 /* Update PSP FW topology after reset */
4987 if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4988 r = amdgpu_xgmi_update_topology(hive, adev);
4991 amdgpu_put_xgmi_hive(hive);
4994 r = amdgpu_ib_ring_tests(adev);
4996 amdgpu_amdkfd_post_reset(adev);
5000 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
5001 amdgpu_inc_vram_lost(adev);
5002 r = amdgpu_device_recover_vram(adev);
5004 amdgpu_virt_release_full_gpu(adev, true);
5006 if (AMDGPU_RETRY_SRIOV_RESET(r)) {
5007 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
5011 DRM_ERROR("GPU reset retry is beyond the retry limit\n");
5018 * amdgpu_device_has_job_running - check if there is any job in mirror list
5020 * @adev: amdgpu_device pointer
5022 * check if there is any job in mirror list
5024 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
5027 struct drm_sched_job *job;
5029 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5030 struct amdgpu_ring *ring = adev->rings[i];
5032 if (!ring || !drm_sched_wqueue_ready(&ring->sched))
5035 spin_lock(&ring->sched.job_list_lock);
5036 job = list_first_entry_or_null(&ring->sched.pending_list,
5037 struct drm_sched_job, list);
5038 spin_unlock(&ring->sched.job_list_lock);
5046 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
5048 * @adev: amdgpu_device pointer
5050 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
5053 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
5056 if (amdgpu_gpu_recovery == 0)
5059 /* Skip soft reset check in fatal error mode */
5060 if (!amdgpu_ras_is_poison_mode_supported(adev))
5063 if (amdgpu_sriov_vf(adev))
5066 if (amdgpu_gpu_recovery == -1) {
5067 switch (adev->asic_type) {
5068 #ifdef CONFIG_DRM_AMDGPU_SI
5075 #ifdef CONFIG_DRM_AMDGPU_CIK
5082 case CHIP_CYAN_SKILLFISH:
5092 dev_info(adev->dev, "GPU recovery disabled.\n");
5096 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
5101 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
5103 dev_info(adev->dev, "GPU mode1 reset\n");
5106 pci_clear_master(adev->pdev);
5108 amdgpu_device_cache_pci_state(adev->pdev);
5110 if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
5111 dev_info(adev->dev, "GPU smu mode1 reset\n");
5112 ret = amdgpu_dpm_mode1_reset(adev);
5114 dev_info(adev->dev, "GPU psp mode1 reset\n");
5115 ret = psp_gpu_reset(adev);
5119 goto mode1_reset_failed;
5121 amdgpu_device_load_pci_state(adev->pdev);
5122 ret = amdgpu_psp_wait_for_bootloader(adev);
5124 goto mode1_reset_failed;
5126 /* wait for asic to come out of reset */
5127 for (i = 0; i < adev->usec_timeout; i++) {
5128 u32 memsize = adev->nbio.funcs->get_memsize(adev);
5130 if (memsize != 0xffffffff)
5135 if (i >= adev->usec_timeout) {
5137 goto mode1_reset_failed;
5140 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
5145 dev_err(adev->dev, "GPU mode1 reset failed\n");
5149 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
5150 struct amdgpu_reset_context *reset_context)
5153 struct amdgpu_job *job = NULL;
5154 bool need_full_reset =
5155 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5157 if (reset_context->reset_req_dev == adev)
5158 job = reset_context->job;
5160 if (amdgpu_sriov_vf(adev)) {
5161 /* stop the data exchange thread */
5162 amdgpu_virt_fini_data_exchange(adev);
5165 amdgpu_fence_driver_isr_toggle(adev, true);
5167 /* block all schedulers and reset given job's ring */
5168 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5169 struct amdgpu_ring *ring = adev->rings[i];
5171 if (!ring || !drm_sched_wqueue_ready(&ring->sched))
5174 /* Clear job fence from fence drv to avoid force_completion
5175 * leave NULL and vm flush fence in fence drv
5177 amdgpu_fence_driver_clear_job_fences(ring);
5179 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
5180 amdgpu_fence_driver_force_completion(ring);
5183 amdgpu_fence_driver_isr_toggle(adev, false);
5186 drm_sched_increase_karma(&job->base);
5188 r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
5189 /* If reset handler not implemented, continue; otherwise return */
5190 if (r == -EOPNOTSUPP)
5195 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
5196 if (!amdgpu_sriov_vf(adev)) {
5198 if (!need_full_reset)
5199 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
5201 if (!need_full_reset && amdgpu_gpu_recovery &&
5202 amdgpu_device_ip_check_soft_reset(adev)) {
5203 amdgpu_device_ip_pre_soft_reset(adev);
5204 r = amdgpu_device_ip_soft_reset(adev);
5205 amdgpu_device_ip_post_soft_reset(adev);
5206 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
5207 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
5208 need_full_reset = true;
5212 if (need_full_reset)
5213 r = amdgpu_device_ip_suspend(adev);
5214 if (need_full_reset)
5215 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5217 clear_bit(AMDGPU_NEED_FULL_RESET,
5218 &reset_context->flags);
5224 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
5228 lockdep_assert_held(&adev->reset_domain->sem);
5230 for (i = 0; i < adev->reset_info.num_regs; i++) {
5231 adev->reset_info.reset_dump_reg_value[i] =
5232 RREG32(adev->reset_info.reset_dump_reg_list[i]);
5234 trace_amdgpu_reset_reg_dumps(adev->reset_info.reset_dump_reg_list[i],
5235 adev->reset_info.reset_dump_reg_value[i]);
5241 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
5242 struct amdgpu_reset_context *reset_context)
5244 struct amdgpu_device *tmp_adev = NULL;
5245 bool need_full_reset, skip_hw_reset, vram_lost = false;
5247 bool gpu_reset_for_dev_remove = 0;
5249 /* Try reset handler method first */
5250 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5252 amdgpu_reset_reg_dumps(tmp_adev);
5254 reset_context->reset_device_list = device_list_handle;
5255 r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
5256 /* If reset handler not implemented, continue; otherwise return */
5257 if (r == -EOPNOTSUPP)
5262 /* Reset handler not implemented, use the default method */
5264 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5265 skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
5267 gpu_reset_for_dev_remove =
5268 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
5269 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5272 * ASIC reset has to be done on all XGMI hive nodes ASAP
5273 * to allow proper links negotiation in FW (within 1 sec)
5275 if (!skip_hw_reset && need_full_reset) {
5276 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5277 /* For XGMI run all resets in parallel to speed up the process */
5278 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5279 tmp_adev->gmc.xgmi.pending_reset = false;
5280 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
5283 r = amdgpu_asic_reset(tmp_adev);
5286 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
5287 r, adev_to_drm(tmp_adev)->unique);
5292 /* For XGMI wait for all resets to complete before proceed */
5294 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5295 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5296 flush_work(&tmp_adev->xgmi_reset_work);
5297 r = tmp_adev->asic_reset_res;
5305 if (!r && amdgpu_ras_intr_triggered()) {
5306 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5307 amdgpu_ras_reset_error_count(tmp_adev, AMDGPU_RAS_BLOCK__MMHUB);
5310 amdgpu_ras_intr_cleared();
5313 /* Since the mode1 reset affects base ip blocks, the
5314 * phase1 ip blocks need to be resumed. Otherwise there
5315 * will be a BIOS signature error and the psp bootloader
5316 * can't load kdb on the next amdgpu install.
5318 if (gpu_reset_for_dev_remove) {
5319 list_for_each_entry(tmp_adev, device_list_handle, reset_list)
5320 amdgpu_device_ip_resume_phase1(tmp_adev);
5325 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5326 if (need_full_reset) {
5328 r = amdgpu_device_asic_init(tmp_adev);
5330 dev_warn(tmp_adev->dev, "asic atom init failed!");
5332 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
5334 r = amdgpu_device_ip_resume_phase1(tmp_adev);
5338 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
5340 amdgpu_coredump(tmp_adev, vram_lost, reset_context);
5343 DRM_INFO("VRAM is lost due to GPU reset!\n");
5344 amdgpu_inc_vram_lost(tmp_adev);
5347 r = amdgpu_device_fw_loading(tmp_adev);
5351 r = amdgpu_xcp_restore_partition_mode(
5356 r = amdgpu_device_ip_resume_phase2(tmp_adev);
5360 if (tmp_adev->mman.buffer_funcs_ring->sched.ready)
5361 amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true);
5364 amdgpu_device_fill_reset_magic(tmp_adev);
5367 * Add this ASIC as tracked as reset was already
5368 * complete successfully.
5370 amdgpu_register_gpu_instance(tmp_adev);
5372 if (!reset_context->hive &&
5373 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5374 amdgpu_xgmi_add_device(tmp_adev);
5376 r = amdgpu_device_ip_late_init(tmp_adev);
5380 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
5383 * The GPU enters bad state once faulty pages
5384 * by ECC has reached the threshold, and ras
5385 * recovery is scheduled next. So add one check
5386 * here to break recovery if it indeed exceeds
5387 * bad page threshold, and remind user to
5388 * retire this GPU or setting one bigger
5389 * bad_page_threshold value to fix this once
5390 * probing driver again.
5392 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
5394 amdgpu_ras_resume(tmp_adev);
5400 /* Update PSP FW topology after reset */
5401 if (reset_context->hive &&
5402 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5403 r = amdgpu_xgmi_update_topology(
5404 reset_context->hive, tmp_adev);
5410 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
5411 r = amdgpu_ib_ring_tests(tmp_adev);
5413 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
5414 need_full_reset = true;
5421 r = amdgpu_device_recover_vram(tmp_adev);
5423 tmp_adev->asic_reset_res = r;
5427 if (need_full_reset)
5428 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5430 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5434 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5437 switch (amdgpu_asic_reset_method(adev)) {
5438 case AMD_RESET_METHOD_MODE1:
5439 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5441 case AMD_RESET_METHOD_MODE2:
5442 adev->mp1_state = PP_MP1_STATE_RESET;
5445 adev->mp1_state = PP_MP1_STATE_NONE;
5450 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5452 amdgpu_vf_error_trans_all(adev);
5453 adev->mp1_state = PP_MP1_STATE_NONE;
5456 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5458 struct pci_dev *p = NULL;
5460 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5461 adev->pdev->bus->number, 1);
5463 pm_runtime_enable(&(p->dev));
5464 pm_runtime_resume(&(p->dev));
5470 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5472 enum amd_reset_method reset_method;
5473 struct pci_dev *p = NULL;
5477 * For now, only BACO and mode1 reset are confirmed
5478 * to suffer the audio issue without proper suspended.
5480 reset_method = amdgpu_asic_reset_method(adev);
5481 if ((reset_method != AMD_RESET_METHOD_BACO) &&
5482 (reset_method != AMD_RESET_METHOD_MODE1))
5485 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5486 adev->pdev->bus->number, 1);
5490 expires = pm_runtime_autosuspend_expiration(&(p->dev));
5493 * If we cannot get the audio device autosuspend delay,
5494 * a fixed 4S interval will be used. Considering 3S is
5495 * the audio controller default autosuspend delay setting.
5496 * 4S used here is guaranteed to cover that.
5498 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5500 while (!pm_runtime_status_suspended(&(p->dev))) {
5501 if (!pm_runtime_suspend(&(p->dev)))
5504 if (expires < ktime_get_mono_fast_ns()) {
5505 dev_warn(adev->dev, "failed to suspend display audio\n");
5507 /* TODO: abort the succeeding gpu reset? */
5512 pm_runtime_disable(&(p->dev));
5518 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5520 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5522 #if defined(CONFIG_DEBUG_FS)
5523 if (!amdgpu_sriov_vf(adev))
5524 cancel_work(&adev->reset_work);
5528 cancel_work(&adev->kfd.reset_work);
5530 if (amdgpu_sriov_vf(adev))
5531 cancel_work(&adev->virt.flr_work);
5533 if (con && adev->ras_enabled)
5534 cancel_work(&con->recovery_work);
5539 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5541 * @adev: amdgpu_device pointer
5542 * @job: which job trigger hang
5543 * @reset_context: amdgpu reset context pointer
5545 * Attempt to reset the GPU if it has hung (all asics).
5546 * Attempt to do soft-reset or full-reset and reinitialize Asic
5547 * Returns 0 for success or an error on failure.
5550 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5551 struct amdgpu_job *job,
5552 struct amdgpu_reset_context *reset_context)
5554 struct list_head device_list, *device_list_handle = NULL;
5555 bool job_signaled = false;
5556 struct amdgpu_hive_info *hive = NULL;
5557 struct amdgpu_device *tmp_adev = NULL;
5559 bool need_emergency_restart = false;
5560 bool audio_suspended = false;
5561 bool gpu_reset_for_dev_remove = false;
5563 gpu_reset_for_dev_remove =
5564 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
5565 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5568 * Special case: RAS triggered and full reset isn't supported
5570 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5573 * Flush RAM to disk so that after reboot
5574 * the user can read log and see why the system rebooted.
5576 if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
5577 amdgpu_ras_get_context(adev)->reboot) {
5578 DRM_WARN("Emergency reboot.");
5581 emergency_restart();
5584 dev_info(adev->dev, "GPU %s begin!\n",
5585 need_emergency_restart ? "jobs stop":"reset");
5587 if (!amdgpu_sriov_vf(adev))
5588 hive = amdgpu_get_xgmi_hive(adev);
5590 mutex_lock(&hive->hive_lock);
5592 reset_context->job = job;
5593 reset_context->hive = hive;
5595 * Build list of devices to reset.
5596 * In case we are in XGMI hive mode, resort the device list
5597 * to put adev in the 1st position.
5599 INIT_LIST_HEAD(&device_list);
5600 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5601 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5602 list_add_tail(&tmp_adev->reset_list, &device_list);
5603 if (gpu_reset_for_dev_remove && adev->shutdown)
5604 tmp_adev->shutdown = true;
5606 if (!list_is_first(&adev->reset_list, &device_list))
5607 list_rotate_to_front(&adev->reset_list, &device_list);
5608 device_list_handle = &device_list;
5610 list_add_tail(&adev->reset_list, &device_list);
5611 device_list_handle = &device_list;
5614 /* We need to lock reset domain only once both for XGMI and single device */
5615 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5617 amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5619 /* block all schedulers and reset given job's ring */
5620 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5622 amdgpu_device_set_mp1_state(tmp_adev);
5625 * Try to put the audio codec into suspend state
5626 * before gpu reset started.
5628 * Due to the power domain of the graphics device
5629 * is shared with AZ power domain. Without this,
5630 * we may change the audio hardware from behind
5631 * the audio driver's back. That will trigger
5632 * some audio codec errors.
5634 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5635 audio_suspended = true;
5637 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5639 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5641 if (!amdgpu_sriov_vf(tmp_adev))
5642 amdgpu_amdkfd_pre_reset(tmp_adev);
5645 * Mark these ASICs to be reseted as untracked first
5646 * And add them back after reset completed
5648 amdgpu_unregister_gpu_instance(tmp_adev);
5650 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5652 /* disable ras on ALL IPs */
5653 if (!need_emergency_restart &&
5654 amdgpu_device_ip_need_full_reset(tmp_adev))
5655 amdgpu_ras_suspend(tmp_adev);
5657 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5658 struct amdgpu_ring *ring = tmp_adev->rings[i];
5660 if (!ring || !drm_sched_wqueue_ready(&ring->sched))
5663 drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5665 if (need_emergency_restart)
5666 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5668 atomic_inc(&tmp_adev->gpu_reset_counter);
5671 if (need_emergency_restart)
5672 goto skip_sched_resume;
5675 * Must check guilty signal here since after this point all old
5676 * HW fences are force signaled.
5678 * job->base holds a reference to parent fence
5680 if (job && dma_fence_is_signaled(&job->hw_fence)) {
5681 job_signaled = true;
5682 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5686 retry: /* Rest of adevs pre asic reset from XGMI hive. */
5687 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5688 if (gpu_reset_for_dev_remove) {
5689 /* Workaroud for ASICs need to disable SMC first */
5690 amdgpu_device_smu_fini_early(tmp_adev);
5692 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5693 /*TODO Should we stop ?*/
5695 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5696 r, adev_to_drm(tmp_adev)->unique);
5697 tmp_adev->asic_reset_res = r;
5701 * Drop all pending non scheduler resets. Scheduler resets
5702 * were already dropped during drm_sched_stop
5704 amdgpu_device_stop_pending_resets(tmp_adev);
5707 /* Actual ASIC resets if needed.*/
5708 /* Host driver will handle XGMI hive reset for SRIOV */
5709 if (amdgpu_sriov_vf(adev)) {
5710 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5712 adev->asic_reset_res = r;
5714 /* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */
5715 if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
5716 IP_VERSION(9, 4, 2) ||
5717 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3))
5718 amdgpu_ras_resume(adev);
5720 r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5721 if (r && r == -EAGAIN)
5724 if (!r && gpu_reset_for_dev_remove)
5730 /* Post ASIC reset for all devs .*/
5731 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5733 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5734 struct amdgpu_ring *ring = tmp_adev->rings[i];
5736 if (!ring || !drm_sched_wqueue_ready(&ring->sched))
5739 drm_sched_start(&ring->sched, true);
5742 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
5743 drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5745 if (tmp_adev->asic_reset_res)
5746 r = tmp_adev->asic_reset_res;
5748 tmp_adev->asic_reset_res = 0;
5751 /* bad news, how to tell it to userspace ? */
5752 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5753 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5755 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5756 if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5757 DRM_WARN("smart shift update failed\n");
5762 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5763 /* unlock kfd: SRIOV would do it separately */
5764 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5765 amdgpu_amdkfd_post_reset(tmp_adev);
5767 /* kfd_post_reset will do nothing if kfd device is not initialized,
5768 * need to bring up kfd here if it's not be initialized before
5770 if (!adev->kfd.init_complete)
5771 amdgpu_amdkfd_device_init(adev);
5773 if (audio_suspended)
5774 amdgpu_device_resume_display_audio(tmp_adev);
5776 amdgpu_device_unset_mp1_state(tmp_adev);
5778 amdgpu_ras_set_error_query_ready(tmp_adev, true);
5782 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5784 amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5787 mutex_unlock(&hive->hive_lock);
5788 amdgpu_put_xgmi_hive(hive);
5792 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5794 atomic_set(&adev->reset_domain->reset_res, r);
5799 * amdgpu_device_partner_bandwidth - find the bandwidth of appropriate partner
5801 * @adev: amdgpu_device pointer
5802 * @speed: pointer to the speed of the link
5803 * @width: pointer to the width of the link
5805 * Evaluate the hierarchy to find the speed and bandwidth capabilities of the
5806 * first physical partner to an AMD dGPU.
5807 * This will exclude any virtual switches and links.
5809 static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
5810 enum pci_bus_speed *speed,
5811 enum pcie_link_width *width)
5813 struct pci_dev *parent = adev->pdev;
5815 if (!speed || !width)
5818 *speed = PCI_SPEED_UNKNOWN;
5819 *width = PCIE_LNK_WIDTH_UNKNOWN;
5821 while ((parent = pci_upstream_bridge(parent))) {
5822 /* skip upstream/downstream switches internal to dGPU*/
5823 if (parent->vendor == PCI_VENDOR_ID_ATI)
5825 *speed = pcie_get_speed_cap(parent);
5826 *width = pcie_get_width_cap(parent);
5832 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5834 * @adev: amdgpu_device pointer
5836 * Fetchs and stores in the driver the PCIE capabilities (gen speed
5837 * and lanes) of the slot the device is in. Handles APUs and
5838 * virtualized environments where PCIE config space may not be available.
5840 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5842 struct pci_dev *pdev;
5843 enum pci_bus_speed speed_cap, platform_speed_cap;
5844 enum pcie_link_width platform_link_width;
5846 if (amdgpu_pcie_gen_cap)
5847 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5849 if (amdgpu_pcie_lane_cap)
5850 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5852 /* covers APUs as well */
5853 if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) {
5854 if (adev->pm.pcie_gen_mask == 0)
5855 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5856 if (adev->pm.pcie_mlw_mask == 0)
5857 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5861 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5864 amdgpu_device_partner_bandwidth(adev, &platform_speed_cap,
5865 &platform_link_width);
5867 if (adev->pm.pcie_gen_mask == 0) {
5870 speed_cap = pcie_get_speed_cap(pdev);
5871 if (speed_cap == PCI_SPEED_UNKNOWN) {
5872 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5873 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5874 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5876 if (speed_cap == PCIE_SPEED_32_0GT)
5877 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5878 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5879 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5880 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5881 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5882 else if (speed_cap == PCIE_SPEED_16_0GT)
5883 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5884 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5885 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5886 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5887 else if (speed_cap == PCIE_SPEED_8_0GT)
5888 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5889 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5890 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5891 else if (speed_cap == PCIE_SPEED_5_0GT)
5892 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5893 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5895 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5898 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5899 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5900 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5902 if (platform_speed_cap == PCIE_SPEED_32_0GT)
5903 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5904 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5905 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5906 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5907 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5908 else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5909 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5910 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5911 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5912 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5913 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5914 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5915 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5916 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5917 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5918 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5919 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5921 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5925 if (adev->pm.pcie_mlw_mask == 0) {
5926 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5927 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5929 switch (platform_link_width) {
5931 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5932 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5933 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5934 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5935 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5936 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5937 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5940 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5941 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5942 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5943 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5944 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5945 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5948 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5949 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5950 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5951 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5952 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5955 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5956 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5957 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5958 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5961 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5962 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5963 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5966 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5967 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5970 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5980 * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5982 * @adev: amdgpu_device pointer
5983 * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5985 * Return true if @peer_adev can access (DMA) @adev through the PCIe
5986 * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5989 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5990 struct amdgpu_device *peer_adev)
5992 #ifdef CONFIG_HSA_AMD_P2P
5993 uint64_t address_mask = peer_adev->dev->dma_mask ?
5994 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5995 resource_size_t aper_limit =
5996 adev->gmc.aper_base + adev->gmc.aper_size - 1;
5998 !adev->gmc.xgmi.connected_to_cpu &&
5999 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
6001 return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
6002 adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
6003 !(adev->gmc.aper_base & address_mask ||
6004 aper_limit & address_mask));
6010 int amdgpu_device_baco_enter(struct drm_device *dev)
6012 struct amdgpu_device *adev = drm_to_adev(dev);
6013 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
6015 if (!amdgpu_device_supports_baco(dev))
6018 if (ras && adev->ras_enabled &&
6019 adev->nbio.funcs->enable_doorbell_interrupt)
6020 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
6022 return amdgpu_dpm_baco_enter(adev);
6025 int amdgpu_device_baco_exit(struct drm_device *dev)
6027 struct amdgpu_device *adev = drm_to_adev(dev);
6028 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
6031 if (!amdgpu_device_supports_baco(dev))
6034 ret = amdgpu_dpm_baco_exit(adev);
6038 if (ras && adev->ras_enabled &&
6039 adev->nbio.funcs->enable_doorbell_interrupt)
6040 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
6042 if (amdgpu_passthrough(adev) &&
6043 adev->nbio.funcs->clear_doorbell_interrupt)
6044 adev->nbio.funcs->clear_doorbell_interrupt(adev);
6050 * amdgpu_pci_error_detected - Called when a PCI error is detected.
6051 * @pdev: PCI device struct
6052 * @state: PCI channel state
6054 * Description: Called when a PCI error is detected.
6056 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
6058 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
6060 struct drm_device *dev = pci_get_drvdata(pdev);
6061 struct amdgpu_device *adev = drm_to_adev(dev);
6064 DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
6066 if (adev->gmc.xgmi.num_physical_nodes > 1) {
6067 DRM_WARN("No support for XGMI hive yet...");
6068 return PCI_ERS_RESULT_DISCONNECT;
6071 adev->pci_channel_state = state;
6074 case pci_channel_io_normal:
6075 return PCI_ERS_RESULT_CAN_RECOVER;
6076 /* Fatal error, prepare for slot reset */
6077 case pci_channel_io_frozen:
6079 * Locking adev->reset_domain->sem will prevent any external access
6080 * to GPU during PCI error recovery
6082 amdgpu_device_lock_reset_domain(adev->reset_domain);
6083 amdgpu_device_set_mp1_state(adev);
6086 * Block any work scheduling as we do for regular GPU reset
6087 * for the duration of the recovery
6089 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
6090 struct amdgpu_ring *ring = adev->rings[i];
6092 if (!ring || !drm_sched_wqueue_ready(&ring->sched))
6095 drm_sched_stop(&ring->sched, NULL);
6097 atomic_inc(&adev->gpu_reset_counter);
6098 return PCI_ERS_RESULT_NEED_RESET;
6099 case pci_channel_io_perm_failure:
6100 /* Permanent error, prepare for device removal */
6101 return PCI_ERS_RESULT_DISCONNECT;
6104 return PCI_ERS_RESULT_NEED_RESET;
6108 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
6109 * @pdev: pointer to PCI device
6111 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
6114 DRM_INFO("PCI error: mmio enabled callback!!\n");
6116 /* TODO - dump whatever for debugging purposes */
6118 /* This called only if amdgpu_pci_error_detected returns
6119 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
6120 * works, no need to reset slot.
6123 return PCI_ERS_RESULT_RECOVERED;
6127 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
6128 * @pdev: PCI device struct
6130 * Description: This routine is called by the pci error recovery
6131 * code after the PCI slot has been reset, just before we
6132 * should resume normal operations.
6134 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
6136 struct drm_device *dev = pci_get_drvdata(pdev);
6137 struct amdgpu_device *adev = drm_to_adev(dev);
6139 struct amdgpu_reset_context reset_context;
6141 struct list_head device_list;
6143 DRM_INFO("PCI error: slot reset callback!!\n");
6145 memset(&reset_context, 0, sizeof(reset_context));
6147 INIT_LIST_HEAD(&device_list);
6148 list_add_tail(&adev->reset_list, &device_list);
6150 /* wait for asic to come out of reset */
6153 /* Restore PCI confspace */
6154 amdgpu_device_load_pci_state(pdev);
6156 /* confirm ASIC came out of reset */
6157 for (i = 0; i < adev->usec_timeout; i++) {
6158 memsize = amdgpu_asic_get_config_memsize(adev);
6160 if (memsize != 0xffffffff)
6164 if (memsize == 0xffffffff) {
6169 reset_context.method = AMD_RESET_METHOD_NONE;
6170 reset_context.reset_req_dev = adev;
6171 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
6172 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
6174 adev->no_hw_access = true;
6175 r = amdgpu_device_pre_asic_reset(adev, &reset_context);
6176 adev->no_hw_access = false;
6180 r = amdgpu_do_asic_reset(&device_list, &reset_context);
6184 if (amdgpu_device_cache_pci_state(adev->pdev))
6185 pci_restore_state(adev->pdev);
6187 DRM_INFO("PCIe error recovery succeeded\n");
6189 DRM_ERROR("PCIe error recovery failed, err:%d", r);
6190 amdgpu_device_unset_mp1_state(adev);
6191 amdgpu_device_unlock_reset_domain(adev->reset_domain);
6194 return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
6198 * amdgpu_pci_resume() - resume normal ops after PCI reset
6199 * @pdev: pointer to PCI device
6201 * Called when the error recovery driver tells us that its
6202 * OK to resume normal operation.
6204 void amdgpu_pci_resume(struct pci_dev *pdev)
6206 struct drm_device *dev = pci_get_drvdata(pdev);
6207 struct amdgpu_device *adev = drm_to_adev(dev);
6211 DRM_INFO("PCI error: resume callback!!\n");
6213 /* Only continue execution for the case of pci_channel_io_frozen */
6214 if (adev->pci_channel_state != pci_channel_io_frozen)
6217 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
6218 struct amdgpu_ring *ring = adev->rings[i];
6220 if (!ring || !drm_sched_wqueue_ready(&ring->sched))
6223 drm_sched_start(&ring->sched, true);
6226 amdgpu_device_unset_mp1_state(adev);
6227 amdgpu_device_unlock_reset_domain(adev->reset_domain);
6230 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
6232 struct drm_device *dev = pci_get_drvdata(pdev);
6233 struct amdgpu_device *adev = drm_to_adev(dev);
6236 r = pci_save_state(pdev);
6238 kfree(adev->pci_state);
6240 adev->pci_state = pci_store_saved_state(pdev);
6242 if (!adev->pci_state) {
6243 DRM_ERROR("Failed to store PCI saved state");
6247 DRM_WARN("Failed to save PCI state, err:%d\n", r);
6254 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
6256 struct drm_device *dev = pci_get_drvdata(pdev);
6257 struct amdgpu_device *adev = drm_to_adev(dev);
6260 if (!adev->pci_state)
6263 r = pci_load_saved_state(pdev, adev->pci_state);
6266 pci_restore_state(pdev);
6268 DRM_WARN("Failed to load PCI state, err:%d\n", r);
6275 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
6276 struct amdgpu_ring *ring)
6278 #ifdef CONFIG_X86_64
6279 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6282 if (adev->gmc.xgmi.connected_to_cpu)
6285 if (ring && ring->funcs->emit_hdp_flush)
6286 amdgpu_ring_emit_hdp_flush(ring);
6288 amdgpu_asic_flush_hdp(adev, ring);
6291 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
6292 struct amdgpu_ring *ring)
6294 #ifdef CONFIG_X86_64
6295 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6298 if (adev->gmc.xgmi.connected_to_cpu)
6301 amdgpu_asic_invalidate_hdp(adev, ring);
6304 int amdgpu_in_reset(struct amdgpu_device *adev)
6306 return atomic_read(&adev->reset_domain->in_gpu_reset);
6310 * amdgpu_device_halt() - bring hardware to some kind of halt state
6312 * @adev: amdgpu_device pointer
6314 * Bring hardware to some kind of halt state so that no one can touch it
6315 * any more. It will help to maintain error context when error occurred.
6316 * Compare to a simple hang, the system will keep stable at least for SSH
6317 * access. Then it should be trivial to inspect the hardware state and
6318 * see what's going on. Implemented as following:
6320 * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
6321 * clears all CPU mappings to device, disallows remappings through page faults
6322 * 2. amdgpu_irq_disable_all() disables all interrupts
6323 * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
6324 * 4. set adev->no_hw_access to avoid potential crashes after setp 5
6325 * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
6326 * 6. pci_disable_device() and pci_wait_for_pending_transaction()
6327 * flush any in flight DMA operations
6329 void amdgpu_device_halt(struct amdgpu_device *adev)
6331 struct pci_dev *pdev = adev->pdev;
6332 struct drm_device *ddev = adev_to_drm(adev);
6334 amdgpu_xcp_dev_unplug(adev);
6335 drm_dev_unplug(ddev);
6337 amdgpu_irq_disable_all(adev);
6339 amdgpu_fence_driver_hw_fini(adev);
6341 adev->no_hw_access = true;
6343 amdgpu_device_unmap_mmio(adev);
6345 pci_disable_device(pdev);
6346 pci_wait_for_pending_transaction(pdev);
6349 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
6352 unsigned long flags, address, data;
6355 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6356 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6358 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6359 WREG32(address, reg * 4);
6360 (void)RREG32(address);
6362 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6366 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
6369 unsigned long flags, address, data;
6371 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6372 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6374 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6375 WREG32(address, reg * 4);
6376 (void)RREG32(address);
6379 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6383 * amdgpu_device_switch_gang - switch to a new gang
6384 * @adev: amdgpu_device pointer
6385 * @gang: the gang to switch to
6387 * Try to switch to a new gang.
6388 * Returns: NULL if we switched to the new gang or a reference to the current
6391 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
6392 struct dma_fence *gang)
6394 struct dma_fence *old = NULL;
6399 old = dma_fence_get_rcu_safe(&adev->gang_submit);
6405 if (!dma_fence_is_signaled(old))
6408 } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
6415 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
6417 switch (adev->asic_type) {
6418 #ifdef CONFIG_DRM_AMDGPU_SI
6422 /* chips with no display hardware */
6424 #ifdef CONFIG_DRM_AMDGPU_SI
6430 #ifdef CONFIG_DRM_AMDGPU_CIK
6439 case CHIP_POLARIS10:
6440 case CHIP_POLARIS11:
6441 case CHIP_POLARIS12:
6445 /* chips with display hardware */
6449 if (!amdgpu_ip_version(adev, DCE_HWIP, 0) ||
6450 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
6456 uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
6457 uint32_t inst, uint32_t reg_addr, char reg_name[],
6458 uint32_t expected_value, uint32_t mask)
6462 uint32_t tmp_ = RREG32(reg_addr);
6463 uint32_t loop = adev->usec_timeout;
6465 while ((tmp_ & (mask)) != (expected_value)) {
6467 loop = adev->usec_timeout;
6471 tmp_ = RREG32(reg_addr);
6474 DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
6475 inst, reg_name, (uint32_t)expected_value,
6476 (uint32_t)(tmp_ & (mask)));