2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
35 #include <linux/devcoredump.h>
36 #include <generated/utsrelease.h>
37 #include <linux/pci-p2pdma.h>
38 #include <linux/apple-gmux.h>
40 #include <drm/drm_aperture.h>
41 #include <drm/drm_atomic_helper.h>
42 #include <drm/drm_crtc_helper.h>
43 #include <drm/drm_fb_helper.h>
44 #include <drm/drm_probe_helper.h>
45 #include <drm/amdgpu_drm.h>
46 #include <linux/vgaarb.h>
47 #include <linux/vga_switcheroo.h>
48 #include <linux/efi.h>
50 #include "amdgpu_trace.h"
51 #include "amdgpu_i2c.h"
53 #include "amdgpu_atombios.h"
54 #include "amdgpu_atomfirmware.h"
56 #ifdef CONFIG_DRM_AMDGPU_SI
59 #ifdef CONFIG_DRM_AMDGPU_CIK
65 #include "bif/bif_4_1_d.h"
66 #include <linux/firmware.h>
67 #include "amdgpu_vf_error.h"
69 #include "amdgpu_amdkfd.h"
70 #include "amdgpu_pm.h"
72 #include "amdgpu_xgmi.h"
73 #include "amdgpu_ras.h"
74 #include "amdgpu_pmu.h"
75 #include "amdgpu_fru_eeprom.h"
76 #include "amdgpu_reset.h"
78 #include <linux/suspend.h>
79 #include <drm/task_barrier.h>
80 #include <linux/pm_runtime.h>
82 #include <drm/drm_drv.h>
84 #if IS_ENABLED(CONFIG_X86)
85 #include <asm/intel-family.h>
88 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
89 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
90 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
91 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
92 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
93 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
94 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
96 #define AMDGPU_RESUME_MS 2000
97 #define AMDGPU_MAX_RETRY_LIMIT 2
98 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
100 static const struct drm_driver amdgpu_kms_driver;
102 const char *amdgpu_asic_name[] = {
144 * DOC: pcie_replay_count
146 * The amdgpu driver provides a sysfs API for reporting the total number
147 * of PCIe replays (NAKs)
148 * The file pcie_replay_count is used for this and returns the total
149 * number of replays as a sum of the NAKs generated and NAKs received
152 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
153 struct device_attribute *attr, char *buf)
155 struct drm_device *ddev = dev_get_drvdata(dev);
156 struct amdgpu_device *adev = drm_to_adev(ddev);
157 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
159 return sysfs_emit(buf, "%llu\n", cnt);
162 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
163 amdgpu_device_get_pcie_replay_count, NULL);
165 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
170 * The amdgpu driver provides a sysfs API for reporting the product name
172 * The file product_name is used for this and returns the product name
173 * as returned from the FRU.
174 * NOTE: This is only available for certain server cards
177 static ssize_t amdgpu_device_get_product_name(struct device *dev,
178 struct device_attribute *attr, char *buf)
180 struct drm_device *ddev = dev_get_drvdata(dev);
181 struct amdgpu_device *adev = drm_to_adev(ddev);
183 return sysfs_emit(buf, "%s\n", adev->product_name);
186 static DEVICE_ATTR(product_name, S_IRUGO,
187 amdgpu_device_get_product_name, NULL);
190 * DOC: product_number
192 * The amdgpu driver provides a sysfs API for reporting the part number
194 * The file product_number is used for this and returns the part number
195 * as returned from the FRU.
196 * NOTE: This is only available for certain server cards
199 static ssize_t amdgpu_device_get_product_number(struct device *dev,
200 struct device_attribute *attr, char *buf)
202 struct drm_device *ddev = dev_get_drvdata(dev);
203 struct amdgpu_device *adev = drm_to_adev(ddev);
205 return sysfs_emit(buf, "%s\n", adev->product_number);
208 static DEVICE_ATTR(product_number, S_IRUGO,
209 amdgpu_device_get_product_number, NULL);
214 * The amdgpu driver provides a sysfs API for reporting the serial number
216 * The file serial_number is used for this and returns the serial number
217 * as returned from the FRU.
218 * NOTE: This is only available for certain server cards
221 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
222 struct device_attribute *attr, char *buf)
224 struct drm_device *ddev = dev_get_drvdata(dev);
225 struct amdgpu_device *adev = drm_to_adev(ddev);
227 return sysfs_emit(buf, "%s\n", adev->serial);
230 static DEVICE_ATTR(serial_number, S_IRUGO,
231 amdgpu_device_get_serial_number, NULL);
234 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
236 * @dev: drm_device pointer
238 * Returns true if the device is a dGPU with ATPX power control,
239 * otherwise return false.
241 bool amdgpu_device_supports_px(struct drm_device *dev)
243 struct amdgpu_device *adev = drm_to_adev(dev);
245 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
251 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
253 * @dev: drm_device pointer
255 * Returns true if the device is a dGPU with ACPI power control,
256 * otherwise return false.
258 bool amdgpu_device_supports_boco(struct drm_device *dev)
260 struct amdgpu_device *adev = drm_to_adev(dev);
263 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
269 * amdgpu_device_supports_baco - Does the device support BACO
271 * @dev: drm_device pointer
273 * Returns true if the device supporte BACO,
274 * otherwise return false.
276 bool amdgpu_device_supports_baco(struct drm_device *dev)
278 struct amdgpu_device *adev = drm_to_adev(dev);
280 return amdgpu_asic_supports_baco(adev);
284 * amdgpu_device_supports_smart_shift - Is the device dGPU with
285 * smart shift support
287 * @dev: drm_device pointer
289 * Returns true if the device is a dGPU with Smart Shift support,
290 * otherwise returns false.
292 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
294 return (amdgpu_device_supports_boco(dev) &&
295 amdgpu_acpi_is_power_shift_control_supported());
299 * VRAM access helper functions
303 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
305 * @adev: amdgpu_device pointer
306 * @pos: offset of the buffer in vram
307 * @buf: virtual address of the buffer in system memory
308 * @size: read/write size, sizeof(@buf) must > @size
309 * @write: true - write to vram, otherwise - read from vram
311 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
312 void *buf, size_t size, bool write)
315 uint32_t hi = ~0, tmp = 0;
316 uint32_t *data = buf;
320 if (!drm_dev_enter(adev_to_drm(adev), &idx))
323 BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
325 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
326 for (last = pos + size; pos < last; pos += 4) {
329 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
331 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
335 WREG32_NO_KIQ(mmMM_DATA, *data++);
337 *data++ = RREG32_NO_KIQ(mmMM_DATA);
340 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
345 * amdgpu_device_aper_access - access vram by vram aperature
347 * @adev: amdgpu_device pointer
348 * @pos: offset of the buffer in vram
349 * @buf: virtual address of the buffer in system memory
350 * @size: read/write size, sizeof(@buf) must > @size
351 * @write: true - write to vram, otherwise - read from vram
353 * The return value means how many bytes have been transferred.
355 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
356 void *buf, size_t size, bool write)
363 if (!adev->mman.aper_base_kaddr)
366 last = min(pos + size, adev->gmc.visible_vram_size);
368 addr = adev->mman.aper_base_kaddr + pos;
372 memcpy_toio(addr, buf, count);
374 amdgpu_device_flush_hdp(adev, NULL);
376 amdgpu_device_invalidate_hdp(adev, NULL);
378 memcpy_fromio(buf, addr, count);
390 * amdgpu_device_vram_access - read/write a buffer in vram
392 * @adev: amdgpu_device pointer
393 * @pos: offset of the buffer in vram
394 * @buf: virtual address of the buffer in system memory
395 * @size: read/write size, sizeof(@buf) must > @size
396 * @write: true - write to vram, otherwise - read from vram
398 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
399 void *buf, size_t size, bool write)
403 /* try to using vram apreature to access vram first */
404 count = amdgpu_device_aper_access(adev, pos, buf, size, write);
407 /* using MM to access rest vram */
410 amdgpu_device_mm_access(adev, pos, buf, size, write);
415 * register access helper functions.
418 /* Check if hw access should be skipped because of hotplug or device error */
419 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
421 if (adev->no_hw_access)
424 #ifdef CONFIG_LOCKDEP
426 * This is a bit complicated to understand, so worth a comment. What we assert
427 * here is that the GPU reset is not running on another thread in parallel.
429 * For this we trylock the read side of the reset semaphore, if that succeeds
430 * we know that the reset is not running in paralell.
432 * If the trylock fails we assert that we are either already holding the read
433 * side of the lock or are the reset thread itself and hold the write side of
437 if (down_read_trylock(&adev->reset_domain->sem))
438 up_read(&adev->reset_domain->sem);
440 lockdep_assert_held(&adev->reset_domain->sem);
447 * amdgpu_device_rreg - read a memory mapped IO or indirect register
449 * @adev: amdgpu_device pointer
450 * @reg: dword aligned register offset
451 * @acc_flags: access flags which require special behavior
453 * Returns the 32 bit value from the offset specified.
455 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
456 uint32_t reg, uint32_t acc_flags)
460 if (amdgpu_device_skip_hw_access(adev))
463 if ((reg * 4) < adev->rmmio_size) {
464 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
465 amdgpu_sriov_runtime(adev) &&
466 down_read_trylock(&adev->reset_domain->sem)) {
467 ret = amdgpu_kiq_rreg(adev, reg);
468 up_read(&adev->reset_domain->sem);
470 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
473 ret = adev->pcie_rreg(adev, reg * 4);
476 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
482 * MMIO register read with bytes helper functions
483 * @offset:bytes offset from MMIO start
488 * amdgpu_mm_rreg8 - read a memory mapped IO register
490 * @adev: amdgpu_device pointer
491 * @offset: byte aligned register offset
493 * Returns the 8 bit value from the offset specified.
495 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
497 if (amdgpu_device_skip_hw_access(adev))
500 if (offset < adev->rmmio_size)
501 return (readb(adev->rmmio + offset));
506 * MMIO register write with bytes helper functions
507 * @offset:bytes offset from MMIO start
508 * @value: the value want to be written to the register
512 * amdgpu_mm_wreg8 - read a memory mapped IO register
514 * @adev: amdgpu_device pointer
515 * @offset: byte aligned register offset
516 * @value: 8 bit value to write
518 * Writes the value specified to the offset specified.
520 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
522 if (amdgpu_device_skip_hw_access(adev))
525 if (offset < adev->rmmio_size)
526 writeb(value, adev->rmmio + offset);
532 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
534 * @adev: amdgpu_device pointer
535 * @reg: dword aligned register offset
536 * @v: 32 bit value to write to the register
537 * @acc_flags: access flags which require special behavior
539 * Writes the value specified to the offset specified.
541 void amdgpu_device_wreg(struct amdgpu_device *adev,
542 uint32_t reg, uint32_t v,
545 if (amdgpu_device_skip_hw_access(adev))
548 if ((reg * 4) < adev->rmmio_size) {
549 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
550 amdgpu_sriov_runtime(adev) &&
551 down_read_trylock(&adev->reset_domain->sem)) {
552 amdgpu_kiq_wreg(adev, reg, v);
553 up_read(&adev->reset_domain->sem);
555 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
558 adev->pcie_wreg(adev, reg * 4, v);
561 trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
565 * amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if in range
567 * @adev: amdgpu_device pointer
568 * @reg: mmio/rlc register
571 * this function is invoked only for the debugfs register access
573 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
574 uint32_t reg, uint32_t v)
576 if (amdgpu_device_skip_hw_access(adev))
579 if (amdgpu_sriov_fullaccess(adev) &&
580 adev->gfx.rlc.funcs &&
581 adev->gfx.rlc.funcs->is_rlcg_access_range) {
582 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
583 return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
584 } else if ((reg * 4) >= adev->rmmio_size) {
585 adev->pcie_wreg(adev, reg * 4, v);
587 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
592 * amdgpu_mm_rdoorbell - read a doorbell dword
594 * @adev: amdgpu_device pointer
595 * @index: doorbell index
597 * Returns the value in the doorbell aperture at the
598 * requested doorbell index (CIK).
600 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
602 if (amdgpu_device_skip_hw_access(adev))
605 if (index < adev->doorbell.num_kernel_doorbells) {
606 return readl(adev->doorbell.ptr + index);
608 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
614 * amdgpu_mm_wdoorbell - write a doorbell dword
616 * @adev: amdgpu_device pointer
617 * @index: doorbell index
620 * Writes @v to the doorbell aperture at the
621 * requested doorbell index (CIK).
623 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
625 if (amdgpu_device_skip_hw_access(adev))
628 if (index < adev->doorbell.num_kernel_doorbells) {
629 writel(v, adev->doorbell.ptr + index);
631 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
636 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
638 * @adev: amdgpu_device pointer
639 * @index: doorbell index
641 * Returns the value in the doorbell aperture at the
642 * requested doorbell index (VEGA10+).
644 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
646 if (amdgpu_device_skip_hw_access(adev))
649 if (index < adev->doorbell.num_kernel_doorbells) {
650 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
652 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
658 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
660 * @adev: amdgpu_device pointer
661 * @index: doorbell index
664 * Writes @v to the doorbell aperture at the
665 * requested doorbell index (VEGA10+).
667 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
669 if (amdgpu_device_skip_hw_access(adev))
672 if (index < adev->doorbell.num_kernel_doorbells) {
673 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
675 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
680 * amdgpu_device_indirect_rreg - read an indirect register
682 * @adev: amdgpu_device pointer
683 * @reg_addr: indirect register address to read from
685 * Returns the value of indirect register @reg_addr
687 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
690 unsigned long flags, pcie_index, pcie_data;
691 void __iomem *pcie_index_offset;
692 void __iomem *pcie_data_offset;
695 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
696 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
698 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
699 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
700 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
702 writel(reg_addr, pcie_index_offset);
703 readl(pcie_index_offset);
704 r = readl(pcie_data_offset);
705 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
710 u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
713 unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
715 void __iomem *pcie_index_offset;
716 void __iomem *pcie_index_hi_offset;
717 void __iomem *pcie_data_offset;
719 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
720 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
721 if (adev->nbio.funcs->get_pcie_index_hi_offset)
722 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
726 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
727 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
728 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
729 if (pcie_index_hi != 0)
730 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
733 writel(reg_addr, pcie_index_offset);
734 readl(pcie_index_offset);
735 if (pcie_index_hi != 0) {
736 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
737 readl(pcie_index_hi_offset);
739 r = readl(pcie_data_offset);
741 /* clear the high bits */
742 if (pcie_index_hi != 0) {
743 writel(0, pcie_index_hi_offset);
744 readl(pcie_index_hi_offset);
747 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
753 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
755 * @adev: amdgpu_device pointer
756 * @reg_addr: indirect register address to read from
758 * Returns the value of indirect register @reg_addr
760 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
763 unsigned long flags, pcie_index, pcie_data;
764 void __iomem *pcie_index_offset;
765 void __iomem *pcie_data_offset;
768 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
769 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
771 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
772 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
773 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
775 /* read low 32 bits */
776 writel(reg_addr, pcie_index_offset);
777 readl(pcie_index_offset);
778 r = readl(pcie_data_offset);
779 /* read high 32 bits */
780 writel(reg_addr + 4, pcie_index_offset);
781 readl(pcie_index_offset);
782 r |= ((u64)readl(pcie_data_offset) << 32);
783 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
789 * amdgpu_device_indirect_wreg - write an indirect register address
791 * @adev: amdgpu_device pointer
792 * @reg_addr: indirect register offset
793 * @reg_data: indirect register data
796 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
797 u32 reg_addr, u32 reg_data)
799 unsigned long flags, pcie_index, pcie_data;
800 void __iomem *pcie_index_offset;
801 void __iomem *pcie_data_offset;
803 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
804 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
806 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
807 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
808 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
810 writel(reg_addr, pcie_index_offset);
811 readl(pcie_index_offset);
812 writel(reg_data, pcie_data_offset);
813 readl(pcie_data_offset);
814 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
817 void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
818 u64 reg_addr, u32 reg_data)
820 unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
821 void __iomem *pcie_index_offset;
822 void __iomem *pcie_index_hi_offset;
823 void __iomem *pcie_data_offset;
825 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
826 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
827 if (adev->nbio.funcs->get_pcie_index_hi_offset)
828 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
832 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
833 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
834 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
835 if (pcie_index_hi != 0)
836 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
839 writel(reg_addr, pcie_index_offset);
840 readl(pcie_index_offset);
841 if (pcie_index_hi != 0) {
842 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
843 readl(pcie_index_hi_offset);
845 writel(reg_data, pcie_data_offset);
846 readl(pcie_data_offset);
848 /* clear the high bits */
849 if (pcie_index_hi != 0) {
850 writel(0, pcie_index_hi_offset);
851 readl(pcie_index_hi_offset);
854 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
858 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
860 * @adev: amdgpu_device pointer
861 * @reg_addr: indirect register offset
862 * @reg_data: indirect register data
865 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
866 u32 reg_addr, u64 reg_data)
868 unsigned long flags, pcie_index, pcie_data;
869 void __iomem *pcie_index_offset;
870 void __iomem *pcie_data_offset;
872 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
873 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
875 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
876 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
877 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
879 /* write low 32 bits */
880 writel(reg_addr, pcie_index_offset);
881 readl(pcie_index_offset);
882 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
883 readl(pcie_data_offset);
884 /* write high 32 bits */
885 writel(reg_addr + 4, pcie_index_offset);
886 readl(pcie_index_offset);
887 writel((u32)(reg_data >> 32), pcie_data_offset);
888 readl(pcie_data_offset);
889 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
893 * amdgpu_device_get_rev_id - query device rev_id
895 * @adev: amdgpu_device pointer
897 * Return device rev_id
899 u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
901 return adev->nbio.funcs->get_rev_id(adev);
905 * amdgpu_invalid_rreg - dummy reg read function
907 * @adev: amdgpu_device pointer
908 * @reg: offset of register
910 * Dummy register read function. Used for register blocks
911 * that certain asics don't have (all asics).
912 * Returns the value in the register.
914 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
916 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
921 static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg)
923 DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
929 * amdgpu_invalid_wreg - dummy reg write function
931 * @adev: amdgpu_device pointer
932 * @reg: offset of register
933 * @v: value to write to the register
935 * Dummy register read function. Used for register blocks
936 * that certain asics don't have (all asics).
938 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
940 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
945 static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v)
947 DRM_ERROR("Invalid callback to write register 0x%llX with 0x%08X\n",
953 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
955 * @adev: amdgpu_device pointer
956 * @reg: offset of register
958 * Dummy register read function. Used for register blocks
959 * that certain asics don't have (all asics).
960 * Returns the value in the register.
962 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
964 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
970 * amdgpu_invalid_wreg64 - dummy reg write function
972 * @adev: amdgpu_device pointer
973 * @reg: offset of register
974 * @v: value to write to the register
976 * Dummy register read function. Used for register blocks
977 * that certain asics don't have (all asics).
979 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
981 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
987 * amdgpu_block_invalid_rreg - dummy reg read function
989 * @adev: amdgpu_device pointer
990 * @block: offset of instance
991 * @reg: offset of register
993 * Dummy register read function. Used for register blocks
994 * that certain asics don't have (all asics).
995 * Returns the value in the register.
997 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
998 uint32_t block, uint32_t reg)
1000 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
1007 * amdgpu_block_invalid_wreg - dummy reg write function
1009 * @adev: amdgpu_device pointer
1010 * @block: offset of instance
1011 * @reg: offset of register
1012 * @v: value to write to the register
1014 * Dummy register read function. Used for register blocks
1015 * that certain asics don't have (all asics).
1017 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
1019 uint32_t reg, uint32_t v)
1021 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
1027 * amdgpu_device_asic_init - Wrapper for atom asic_init
1029 * @adev: amdgpu_device pointer
1031 * Does any asic specific work and then calls atom asic init.
1033 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
1035 amdgpu_asic_pre_asic_init(adev);
1037 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) ||
1038 adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
1039 return amdgpu_atomfirmware_asic_init(adev, true);
1041 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
1045 * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
1047 * @adev: amdgpu_device pointer
1049 * Allocates a scratch page of VRAM for use by various things in the
1052 static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
1054 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
1055 AMDGPU_GEM_DOMAIN_VRAM |
1056 AMDGPU_GEM_DOMAIN_GTT,
1057 &adev->mem_scratch.robj,
1058 &adev->mem_scratch.gpu_addr,
1059 (void **)&adev->mem_scratch.ptr);
1063 * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
1065 * @adev: amdgpu_device pointer
1067 * Frees the VRAM scratch page.
1069 static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
1071 amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
1075 * amdgpu_device_program_register_sequence - program an array of registers.
1077 * @adev: amdgpu_device pointer
1078 * @registers: pointer to the register array
1079 * @array_size: size of the register array
1081 * Programs an array or registers with and and or masks.
1082 * This is a helper for setting golden registers.
1084 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
1085 const u32 *registers,
1086 const u32 array_size)
1088 u32 tmp, reg, and_mask, or_mask;
1094 for (i = 0; i < array_size; i += 3) {
1095 reg = registers[i + 0];
1096 and_mask = registers[i + 1];
1097 or_mask = registers[i + 2];
1099 if (and_mask == 0xffffffff) {
1104 if (adev->family >= AMDGPU_FAMILY_AI)
1105 tmp |= (or_mask & and_mask);
1114 * amdgpu_device_pci_config_reset - reset the GPU
1116 * @adev: amdgpu_device pointer
1118 * Resets the GPU using the pci config reset sequence.
1119 * Only applicable to asics prior to vega10.
1121 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1123 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1127 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1129 * @adev: amdgpu_device pointer
1131 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1133 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1135 return pci_reset_function(adev->pdev);
1139 * GPU doorbell aperture helpers function.
1142 * amdgpu_device_doorbell_init - Init doorbell driver information.
1144 * @adev: amdgpu_device pointer
1146 * Init doorbell driver information (CIK)
1147 * Returns 0 on success, error on failure.
1149 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1152 /* No doorbell on SI hardware generation */
1153 if (adev->asic_type < CHIP_BONAIRE) {
1154 adev->doorbell.base = 0;
1155 adev->doorbell.size = 0;
1156 adev->doorbell.num_kernel_doorbells = 0;
1157 adev->doorbell.ptr = NULL;
1161 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1164 amdgpu_asic_init_doorbell_index(adev);
1166 /* doorbell bar mapping */
1167 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1168 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1170 if (adev->enable_mes) {
1171 adev->doorbell.num_kernel_doorbells =
1172 adev->doorbell.size / sizeof(u32);
1174 adev->doorbell.num_kernel_doorbells =
1175 min_t(u32, adev->doorbell.size / sizeof(u32),
1176 adev->doorbell_index.max_assignment+1);
1177 if (adev->doorbell.num_kernel_doorbells == 0)
1180 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
1181 * paging queue doorbell use the second page. The
1182 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1183 * doorbells are in the first page. So with paging queue enabled,
1184 * the max num_kernel_doorbells should + 1 page (0x400 in dword)
1186 if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(4, 0, 0) &&
1187 adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(4, 2, 0))
1188 adev->doorbell.num_kernel_doorbells += 0x400;
1191 adev->doorbell.ptr = ioremap(adev->doorbell.base,
1192 adev->doorbell.num_kernel_doorbells *
1194 if (adev->doorbell.ptr == NULL)
1201 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1203 * @adev: amdgpu_device pointer
1205 * Tear down doorbell driver information (CIK)
1207 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1209 iounmap(adev->doorbell.ptr);
1210 adev->doorbell.ptr = NULL;
1216 * amdgpu_device_wb_*()
1217 * Writeback is the method by which the GPU updates special pages in memory
1218 * with the status of certain GPU events (fences, ring pointers,etc.).
1222 * amdgpu_device_wb_fini - Disable Writeback and free memory
1224 * @adev: amdgpu_device pointer
1226 * Disables Writeback and frees the Writeback memory (all asics).
1227 * Used at driver shutdown.
1229 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1231 if (adev->wb.wb_obj) {
1232 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1234 (void **)&adev->wb.wb);
1235 adev->wb.wb_obj = NULL;
1240 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1242 * @adev: amdgpu_device pointer
1244 * Initializes writeback and allocates writeback memory (all asics).
1245 * Used at driver startup.
1246 * Returns 0 on success or an -error on failure.
1248 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1252 if (adev->wb.wb_obj == NULL) {
1253 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1254 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1255 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1256 &adev->wb.wb_obj, &adev->wb.gpu_addr,
1257 (void **)&adev->wb.wb);
1259 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1263 adev->wb.num_wb = AMDGPU_MAX_WB;
1264 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1266 /* clear wb memory */
1267 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1274 * amdgpu_device_wb_get - Allocate a wb entry
1276 * @adev: amdgpu_device pointer
1279 * Allocate a wb slot for use by the driver (all asics).
1280 * Returns 0 on success or -EINVAL on failure.
1282 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1284 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1286 if (offset < adev->wb.num_wb) {
1287 __set_bit(offset, adev->wb.used);
1288 *wb = offset << 3; /* convert to dw offset */
1296 * amdgpu_device_wb_free - Free a wb entry
1298 * @adev: amdgpu_device pointer
1301 * Free a wb slot allocated for use by the driver (all asics)
1303 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1306 if (wb < adev->wb.num_wb)
1307 __clear_bit(wb, adev->wb.used);
1311 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1313 * @adev: amdgpu_device pointer
1315 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1316 * to fail, but if any of the BARs is not accessible after the size we abort
1317 * driver loading by returning -ENODEV.
1319 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1321 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1322 struct pci_bus *root;
1323 struct resource *res;
1329 if (amdgpu_sriov_vf(adev))
1332 /* skip if the bios has already enabled large BAR */
1333 if (adev->gmc.real_vram_size &&
1334 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1337 /* Check if the root BUS has 64bit memory resources */
1338 root = adev->pdev->bus;
1339 while (root->parent)
1340 root = root->parent;
1342 pci_bus_for_each_resource(root, res, i) {
1343 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1344 res->start > 0x100000000ull)
1348 /* Trying to resize is pointless without a root hub window above 4GB */
1352 /* Limit the BAR size to what is available */
1353 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1356 /* Disable memory decoding while we change the BAR addresses and size */
1357 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1358 pci_write_config_word(adev->pdev, PCI_COMMAND,
1359 cmd & ~PCI_COMMAND_MEMORY);
1361 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1362 amdgpu_device_doorbell_fini(adev);
1363 if (adev->asic_type >= CHIP_BONAIRE)
1364 pci_release_resource(adev->pdev, 2);
1366 pci_release_resource(adev->pdev, 0);
1368 r = pci_resize_resource(adev->pdev, 0, rbar_size);
1370 DRM_INFO("Not enough PCI address space for a large BAR.");
1371 else if (r && r != -ENOTSUPP)
1372 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1374 pci_assign_unassigned_bus_resources(adev->pdev->bus);
1376 /* When the doorbell or fb BAR isn't available we have no chance of
1379 r = amdgpu_device_doorbell_init(adev);
1380 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1383 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1388 static bool amdgpu_device_read_bios(struct amdgpu_device *adev)
1390 if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU)) {
1398 * GPU helpers function.
1401 * amdgpu_device_need_post - check if the hw need post or not
1403 * @adev: amdgpu_device pointer
1405 * Check if the asic has been initialized (all asics) at driver startup
1406 * or post is needed if hw reset is performed.
1407 * Returns true if need or false if not.
1409 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1413 if (amdgpu_sriov_vf(adev))
1416 if (!amdgpu_device_read_bios(adev))
1419 if (amdgpu_passthrough(adev)) {
1420 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1421 * some old smc fw still need driver do vPost otherwise gpu hang, while
1422 * those smc fw version above 22.15 doesn't have this flaw, so we force
1423 * vpost executed for smc version below 22.15
1425 if (adev->asic_type == CHIP_FIJI) {
1428 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1429 /* force vPost if error occured */
1433 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1434 if (fw_ver < 0x00160e00)
1439 /* Don't post if we need to reset whole hive on init */
1440 if (adev->gmc.xgmi.pending_reset)
1443 if (adev->has_hw_reset) {
1444 adev->has_hw_reset = false;
1448 /* bios scratch used on CIK+ */
1449 if (adev->asic_type >= CHIP_BONAIRE)
1450 return amdgpu_atombios_scratch_need_asic_init(adev);
1452 /* check MEM_SIZE for older asics */
1453 reg = amdgpu_asic_get_config_memsize(adev);
1455 if ((reg != 0) && (reg != 0xffffffff))
1462 * Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
1463 * speed switching. Until we have confirmation from Intel that a specific host
1464 * supports it, it's safer that we keep it disabled for all.
1466 * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
1467 * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
1469 bool amdgpu_device_pcie_dynamic_switching_supported(void)
1471 #if IS_ENABLED(CONFIG_X86)
1472 struct cpuinfo_x86 *c = &cpu_data(0);
1474 if (c->x86_vendor == X86_VENDOR_INTEL)
1481 * amdgpu_device_should_use_aspm - check if the device should program ASPM
1483 * @adev: amdgpu_device pointer
1485 * Confirm whether the module parameter and pcie bridge agree that ASPM should
1486 * be set for this device.
1488 * Returns true if it should be used or false if not.
1490 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1492 switch (amdgpu_aspm) {
1502 return pcie_aspm_enabled(adev->pdev);
1505 bool amdgpu_device_aspm_support_quirk(void)
1507 #if IS_ENABLED(CONFIG_X86)
1508 struct cpuinfo_x86 *c = &cpu_data(0);
1510 return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
1516 /* if we get transitioned to only one device, take VGA back */
1518 * amdgpu_device_vga_set_decode - enable/disable vga decode
1520 * @pdev: PCI device pointer
1521 * @state: enable/disable vga decode
1523 * Enable/disable vga decode (all asics).
1524 * Returns VGA resource flags.
1526 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1529 struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1530 amdgpu_asic_set_vga_state(adev, state);
1532 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1533 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1535 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1539 * amdgpu_device_check_block_size - validate the vm block size
1541 * @adev: amdgpu_device pointer
1543 * Validates the vm block size specified via module parameter.
1544 * The vm block size defines number of bits in page table versus page directory,
1545 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1546 * page table and the remaining bits are in the page directory.
1548 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1550 /* defines number of bits in page table versus page directory,
1551 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1552 * page table and the remaining bits are in the page directory */
1553 if (amdgpu_vm_block_size == -1)
1556 if (amdgpu_vm_block_size < 9) {
1557 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1558 amdgpu_vm_block_size);
1559 amdgpu_vm_block_size = -1;
1564 * amdgpu_device_check_vm_size - validate the vm size
1566 * @adev: amdgpu_device pointer
1568 * Validates the vm size in GB specified via module parameter.
1569 * The VM size is the size of the GPU virtual memory space in GB.
1571 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1573 /* no need to check the default value */
1574 if (amdgpu_vm_size == -1)
1577 if (amdgpu_vm_size < 1) {
1578 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1580 amdgpu_vm_size = -1;
1584 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1587 bool is_os_64 = (sizeof(void *) == 8);
1588 uint64_t total_memory;
1589 uint64_t dram_size_seven_GB = 0x1B8000000;
1590 uint64_t dram_size_three_GB = 0xB8000000;
1592 if (amdgpu_smu_memory_pool_size == 0)
1596 DRM_WARN("Not 64-bit OS, feature not supported\n");
1600 total_memory = (uint64_t)si.totalram * si.mem_unit;
1602 if ((amdgpu_smu_memory_pool_size == 1) ||
1603 (amdgpu_smu_memory_pool_size == 2)) {
1604 if (total_memory < dram_size_three_GB)
1606 } else if ((amdgpu_smu_memory_pool_size == 4) ||
1607 (amdgpu_smu_memory_pool_size == 8)) {
1608 if (total_memory < dram_size_seven_GB)
1611 DRM_WARN("Smu memory pool size not supported\n");
1614 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1619 DRM_WARN("No enough system memory\n");
1621 adev->pm.smu_prv_buffer_size = 0;
1624 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1626 if (!(adev->flags & AMD_IS_APU) ||
1627 adev->asic_type < CHIP_RAVEN)
1630 switch (adev->asic_type) {
1632 if (adev->pdev->device == 0x15dd)
1633 adev->apu_flags |= AMD_APU_IS_RAVEN;
1634 if (adev->pdev->device == 0x15d8)
1635 adev->apu_flags |= AMD_APU_IS_PICASSO;
1638 if ((adev->pdev->device == 0x1636) ||
1639 (adev->pdev->device == 0x164c))
1640 adev->apu_flags |= AMD_APU_IS_RENOIR;
1642 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1645 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1647 case CHIP_YELLOW_CARP:
1649 case CHIP_CYAN_SKILLFISH:
1650 if ((adev->pdev->device == 0x13FE) ||
1651 (adev->pdev->device == 0x143F))
1652 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1662 * amdgpu_device_check_arguments - validate module params
1664 * @adev: amdgpu_device pointer
1666 * Validates certain module parameters and updates
1667 * the associated values used by the driver (all asics).
1669 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1671 if (amdgpu_sched_jobs < 4) {
1672 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1674 amdgpu_sched_jobs = 4;
1675 } else if (!is_power_of_2(amdgpu_sched_jobs)) {
1676 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1678 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1681 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1682 /* gart size must be greater or equal to 32M */
1683 dev_warn(adev->dev, "gart size (%d) too small\n",
1685 amdgpu_gart_size = -1;
1688 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1689 /* gtt size must be greater or equal to 32M */
1690 dev_warn(adev->dev, "gtt size (%d) too small\n",
1692 amdgpu_gtt_size = -1;
1695 /* valid range is between 4 and 9 inclusive */
1696 if (amdgpu_vm_fragment_size != -1 &&
1697 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1698 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1699 amdgpu_vm_fragment_size = -1;
1702 if (amdgpu_sched_hw_submission < 2) {
1703 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1704 amdgpu_sched_hw_submission);
1705 amdgpu_sched_hw_submission = 2;
1706 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1707 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1708 amdgpu_sched_hw_submission);
1709 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1712 if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1713 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1714 amdgpu_reset_method = -1;
1717 amdgpu_device_check_smu_prv_buffer_size(adev);
1719 amdgpu_device_check_vm_size(adev);
1721 amdgpu_device_check_block_size(adev);
1723 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1729 * amdgpu_switcheroo_set_state - set switcheroo state
1731 * @pdev: pci dev pointer
1732 * @state: vga_switcheroo state
1734 * Callback for the switcheroo driver. Suspends or resumes
1735 * the asics before or after it is powered up using ACPI methods.
1737 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1738 enum vga_switcheroo_state state)
1740 struct drm_device *dev = pci_get_drvdata(pdev);
1743 if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1746 if (state == VGA_SWITCHEROO_ON) {
1747 pr_info("switched on\n");
1748 /* don't suspend or resume card normally */
1749 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1751 pci_set_power_state(pdev, PCI_D0);
1752 amdgpu_device_load_pci_state(pdev);
1753 r = pci_enable_device(pdev);
1755 DRM_WARN("pci_enable_device failed (%d)\n", r);
1756 amdgpu_device_resume(dev, true);
1758 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1760 pr_info("switched off\n");
1761 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1762 amdgpu_device_suspend(dev, true);
1763 amdgpu_device_cache_pci_state(pdev);
1764 /* Shut down the device */
1765 pci_disable_device(pdev);
1766 pci_set_power_state(pdev, PCI_D3cold);
1767 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1772 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1774 * @pdev: pci dev pointer
1776 * Callback for the switcheroo driver. Check of the switcheroo
1777 * state can be changed.
1778 * Returns true if the state can be changed, false if not.
1780 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1782 struct drm_device *dev = pci_get_drvdata(pdev);
1785 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1786 * locking inversion with the driver load path. And the access here is
1787 * completely racy anyway. So don't bother with locking for now.
1789 return atomic_read(&dev->open_count) == 0;
1792 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1793 .set_gpu_state = amdgpu_switcheroo_set_state,
1795 .can_switch = amdgpu_switcheroo_can_switch,
1799 * amdgpu_device_ip_set_clockgating_state - set the CG state
1801 * @dev: amdgpu_device pointer
1802 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1803 * @state: clockgating state (gate or ungate)
1805 * Sets the requested clockgating state for all instances of
1806 * the hardware IP specified.
1807 * Returns the error code from the last instance.
1809 int amdgpu_device_ip_set_clockgating_state(void *dev,
1810 enum amd_ip_block_type block_type,
1811 enum amd_clockgating_state state)
1813 struct amdgpu_device *adev = dev;
1816 for (i = 0; i < adev->num_ip_blocks; i++) {
1817 if (!adev->ip_blocks[i].status.valid)
1819 if (adev->ip_blocks[i].version->type != block_type)
1821 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1823 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1824 (void *)adev, state);
1826 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1827 adev->ip_blocks[i].version->funcs->name, r);
1833 * amdgpu_device_ip_set_powergating_state - set the PG state
1835 * @dev: amdgpu_device pointer
1836 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1837 * @state: powergating state (gate or ungate)
1839 * Sets the requested powergating state for all instances of
1840 * the hardware IP specified.
1841 * Returns the error code from the last instance.
1843 int amdgpu_device_ip_set_powergating_state(void *dev,
1844 enum amd_ip_block_type block_type,
1845 enum amd_powergating_state state)
1847 struct amdgpu_device *adev = dev;
1850 for (i = 0; i < adev->num_ip_blocks; i++) {
1851 if (!adev->ip_blocks[i].status.valid)
1853 if (adev->ip_blocks[i].version->type != block_type)
1855 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1857 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1858 (void *)adev, state);
1860 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1861 adev->ip_blocks[i].version->funcs->name, r);
1867 * amdgpu_device_ip_get_clockgating_state - get the CG state
1869 * @adev: amdgpu_device pointer
1870 * @flags: clockgating feature flags
1872 * Walks the list of IPs on the device and updates the clockgating
1873 * flags for each IP.
1874 * Updates @flags with the feature flags for each hardware IP where
1875 * clockgating is enabled.
1877 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1882 for (i = 0; i < adev->num_ip_blocks; i++) {
1883 if (!adev->ip_blocks[i].status.valid)
1885 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1886 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1891 * amdgpu_device_ip_wait_for_idle - wait for idle
1893 * @adev: amdgpu_device pointer
1894 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1896 * Waits for the request hardware IP to be idle.
1897 * Returns 0 for success or a negative error code on failure.
1899 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1900 enum amd_ip_block_type block_type)
1904 for (i = 0; i < adev->num_ip_blocks; i++) {
1905 if (!adev->ip_blocks[i].status.valid)
1907 if (adev->ip_blocks[i].version->type == block_type) {
1908 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1919 * amdgpu_device_ip_is_idle - is the hardware IP idle
1921 * @adev: amdgpu_device pointer
1922 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1924 * Check if the hardware IP is idle or not.
1925 * Returns true if it the IP is idle, false if not.
1927 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1928 enum amd_ip_block_type block_type)
1932 for (i = 0; i < adev->num_ip_blocks; i++) {
1933 if (!adev->ip_blocks[i].status.valid)
1935 if (adev->ip_blocks[i].version->type == block_type)
1936 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1943 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1945 * @adev: amdgpu_device pointer
1946 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1948 * Returns a pointer to the hardware IP block structure
1949 * if it exists for the asic, otherwise NULL.
1951 struct amdgpu_ip_block *
1952 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1953 enum amd_ip_block_type type)
1957 for (i = 0; i < adev->num_ip_blocks; i++)
1958 if (adev->ip_blocks[i].version->type == type)
1959 return &adev->ip_blocks[i];
1965 * amdgpu_device_ip_block_version_cmp
1967 * @adev: amdgpu_device pointer
1968 * @type: enum amd_ip_block_type
1969 * @major: major version
1970 * @minor: minor version
1972 * return 0 if equal or greater
1973 * return 1 if smaller or the ip_block doesn't exist
1975 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1976 enum amd_ip_block_type type,
1977 u32 major, u32 minor)
1979 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1981 if (ip_block && ((ip_block->version->major > major) ||
1982 ((ip_block->version->major == major) &&
1983 (ip_block->version->minor >= minor))))
1990 * amdgpu_device_ip_block_add
1992 * @adev: amdgpu_device pointer
1993 * @ip_block_version: pointer to the IP to add
1995 * Adds the IP block driver information to the collection of IPs
1998 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1999 const struct amdgpu_ip_block_version *ip_block_version)
2001 if (!ip_block_version)
2004 switch (ip_block_version->type) {
2005 case AMD_IP_BLOCK_TYPE_VCN:
2006 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
2009 case AMD_IP_BLOCK_TYPE_JPEG:
2010 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
2017 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
2018 ip_block_version->funcs->name);
2020 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
2026 * amdgpu_device_enable_virtual_display - enable virtual display feature
2028 * @adev: amdgpu_device pointer
2030 * Enabled the virtual display feature if the user has enabled it via
2031 * the module parameter virtual_display. This feature provides a virtual
2032 * display hardware on headless boards or in virtualized environments.
2033 * This function parses and validates the configuration string specified by
2034 * the user and configues the virtual display configuration (number of
2035 * virtual connectors, crtcs, etc.) specified.
2037 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
2039 adev->enable_virtual_display = false;
2041 if (amdgpu_virtual_display) {
2042 const char *pci_address_name = pci_name(adev->pdev);
2043 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
2045 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
2046 pciaddstr_tmp = pciaddstr;
2047 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
2048 pciaddname = strsep(&pciaddname_tmp, ",");
2049 if (!strcmp("all", pciaddname)
2050 || !strcmp(pci_address_name, pciaddname)) {
2054 adev->enable_virtual_display = true;
2057 res = kstrtol(pciaddname_tmp, 10,
2065 adev->mode_info.num_crtc = num_crtc;
2067 adev->mode_info.num_crtc = 1;
2073 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
2074 amdgpu_virtual_display, pci_address_name,
2075 adev->enable_virtual_display, adev->mode_info.num_crtc);
2081 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
2083 if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
2084 adev->mode_info.num_crtc = 1;
2085 adev->enable_virtual_display = true;
2086 DRM_INFO("virtual_display:%d, num_crtc:%d\n",
2087 adev->enable_virtual_display, adev->mode_info.num_crtc);
2092 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
2094 * @adev: amdgpu_device pointer
2096 * Parses the asic configuration parameters specified in the gpu info
2097 * firmware and makes them availale to the driver for use in configuring
2099 * Returns 0 on success, -EINVAL on failure.
2101 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
2103 const char *chip_name;
2106 const struct gpu_info_firmware_header_v1_0 *hdr;
2108 adev->firmware.gpu_info_fw = NULL;
2110 if (adev->mman.discovery_bin) {
2112 * FIXME: The bounding box is still needed by Navi12, so
2113 * temporarily read it from gpu_info firmware. Should be dropped
2114 * when DAL no longer needs it.
2116 if (adev->asic_type != CHIP_NAVI12)
2120 switch (adev->asic_type) {
2124 chip_name = "vega10";
2127 chip_name = "vega12";
2130 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
2131 chip_name = "raven2";
2132 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
2133 chip_name = "picasso";
2135 chip_name = "raven";
2138 chip_name = "arcturus";
2141 chip_name = "navi12";
2145 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
2146 err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, fw_name);
2149 "Failed to get gpu_info firmware \"%s\"\n",
2154 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
2155 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
2157 switch (hdr->version_major) {
2160 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2161 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2162 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2165 * Should be droped when DAL no longer needs it.
2167 if (adev->asic_type == CHIP_NAVI12)
2168 goto parse_soc_bounding_box;
2170 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2171 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2172 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2173 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2174 adev->gfx.config.max_texture_channel_caches =
2175 le32_to_cpu(gpu_info_fw->gc_num_tccs);
2176 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2177 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2178 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2179 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2180 adev->gfx.config.double_offchip_lds_buf =
2181 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2182 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2183 adev->gfx.cu_info.max_waves_per_simd =
2184 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2185 adev->gfx.cu_info.max_scratch_slots_per_cu =
2186 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2187 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2188 if (hdr->version_minor >= 1) {
2189 const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2190 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2191 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2192 adev->gfx.config.num_sc_per_sh =
2193 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2194 adev->gfx.config.num_packer_per_sc =
2195 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2198 parse_soc_bounding_box:
2200 * soc bounding box info is not integrated in disocovery table,
2201 * we always need to parse it from gpu info firmware if needed.
2203 if (hdr->version_minor == 2) {
2204 const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2205 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2206 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2207 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2213 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2222 * amdgpu_device_ip_early_init - run early init for hardware IPs
2224 * @adev: amdgpu_device pointer
2226 * Early initialization pass for hardware IPs. The hardware IPs that make
2227 * up each asic are discovered each IP's early_init callback is run. This
2228 * is the first stage in initializing the asic.
2229 * Returns 0 on success, negative error code on failure.
2231 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2233 struct drm_device *dev = adev_to_drm(adev);
2234 struct pci_dev *parent;
2238 amdgpu_device_enable_virtual_display(adev);
2240 if (amdgpu_sriov_vf(adev)) {
2241 r = amdgpu_virt_request_full_gpu(adev, true);
2246 switch (adev->asic_type) {
2247 #ifdef CONFIG_DRM_AMDGPU_SI
2253 adev->family = AMDGPU_FAMILY_SI;
2254 r = si_set_ip_blocks(adev);
2259 #ifdef CONFIG_DRM_AMDGPU_CIK
2265 if (adev->flags & AMD_IS_APU)
2266 adev->family = AMDGPU_FAMILY_KV;
2268 adev->family = AMDGPU_FAMILY_CI;
2270 r = cik_set_ip_blocks(adev);
2278 case CHIP_POLARIS10:
2279 case CHIP_POLARIS11:
2280 case CHIP_POLARIS12:
2284 if (adev->flags & AMD_IS_APU)
2285 adev->family = AMDGPU_FAMILY_CZ;
2287 adev->family = AMDGPU_FAMILY_VI;
2289 r = vi_set_ip_blocks(adev);
2294 r = amdgpu_discovery_set_ip_blocks(adev);
2300 if (amdgpu_has_atpx() &&
2301 (amdgpu_is_atpx_hybrid() ||
2302 amdgpu_has_atpx_dgpu_power_cntl()) &&
2303 ((adev->flags & AMD_IS_APU) == 0) &&
2304 !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
2305 adev->flags |= AMD_IS_PX;
2307 if (!(adev->flags & AMD_IS_APU)) {
2308 parent = pci_upstream_bridge(adev->pdev);
2309 adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2313 adev->pm.pp_feature = amdgpu_pp_feature_mask;
2314 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2315 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2316 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2317 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2320 for (i = 0; i < adev->num_ip_blocks; i++) {
2321 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2322 DRM_WARN("disabled ip block: %d <%s>\n",
2323 i, adev->ip_blocks[i].version->funcs->name);
2324 adev->ip_blocks[i].status.valid = false;
2326 if (adev->ip_blocks[i].version->funcs->early_init) {
2327 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2329 adev->ip_blocks[i].status.valid = false;
2331 DRM_ERROR("early_init of IP block <%s> failed %d\n",
2332 adev->ip_blocks[i].version->funcs->name, r);
2335 adev->ip_blocks[i].status.valid = true;
2338 adev->ip_blocks[i].status.valid = true;
2341 /* get the vbios after the asic_funcs are set up */
2342 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2343 r = amdgpu_device_parse_gpu_info_fw(adev);
2348 if (amdgpu_device_read_bios(adev)) {
2349 if (!amdgpu_get_bios(adev))
2352 r = amdgpu_atombios_init(adev);
2354 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2355 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2360 /*get pf2vf msg info at it's earliest time*/
2361 if (amdgpu_sriov_vf(adev))
2362 amdgpu_virt_init_data_exchange(adev);
2369 amdgpu_amdkfd_device_probe(adev);
2370 adev->cg_flags &= amdgpu_cg_mask;
2371 adev->pg_flags &= amdgpu_pg_mask;
2376 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2380 for (i = 0; i < adev->num_ip_blocks; i++) {
2381 if (!adev->ip_blocks[i].status.sw)
2383 if (adev->ip_blocks[i].status.hw)
2385 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2386 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2387 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2388 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2390 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2391 adev->ip_blocks[i].version->funcs->name, r);
2394 adev->ip_blocks[i].status.hw = true;
2401 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2405 for (i = 0; i < adev->num_ip_blocks; i++) {
2406 if (!adev->ip_blocks[i].status.sw)
2408 if (adev->ip_blocks[i].status.hw)
2410 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2412 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2413 adev->ip_blocks[i].version->funcs->name, r);
2416 adev->ip_blocks[i].status.hw = true;
2422 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2426 uint32_t smu_version;
2428 if (adev->asic_type >= CHIP_VEGA10) {
2429 for (i = 0; i < adev->num_ip_blocks; i++) {
2430 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2433 if (!adev->ip_blocks[i].status.sw)
2436 /* no need to do the fw loading again if already done*/
2437 if (adev->ip_blocks[i].status.hw == true)
2440 if (amdgpu_in_reset(adev) || adev->in_suspend) {
2441 r = adev->ip_blocks[i].version->funcs->resume(adev);
2443 DRM_ERROR("resume of IP block <%s> failed %d\n",
2444 adev->ip_blocks[i].version->funcs->name, r);
2448 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2450 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2451 adev->ip_blocks[i].version->funcs->name, r);
2456 adev->ip_blocks[i].status.hw = true;
2461 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2462 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2467 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2472 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2473 struct amdgpu_ring *ring = adev->rings[i];
2475 /* No need to setup the GPU scheduler for rings that don't need it */
2476 if (!ring || ring->no_scheduler)
2479 switch (ring->funcs->type) {
2480 case AMDGPU_RING_TYPE_GFX:
2481 timeout = adev->gfx_timeout;
2483 case AMDGPU_RING_TYPE_COMPUTE:
2484 timeout = adev->compute_timeout;
2486 case AMDGPU_RING_TYPE_SDMA:
2487 timeout = adev->sdma_timeout;
2490 timeout = adev->video_timeout;
2494 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
2495 ring->num_hw_submission, 0,
2496 timeout, adev->reset_domain->wq,
2497 ring->sched_score, ring->name,
2500 DRM_ERROR("Failed to create scheduler on ring %s.\n",
2506 amdgpu_xcp_update_partition_sched_list(adev);
2513 * amdgpu_device_ip_init - run init for hardware IPs
2515 * @adev: amdgpu_device pointer
2517 * Main initialization pass for hardware IPs. The list of all the hardware
2518 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2519 * are run. sw_init initializes the software state associated with each IP
2520 * and hw_init initializes the hardware associated with each IP.
2521 * Returns 0 on success, negative error code on failure.
2523 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2527 r = amdgpu_ras_init(adev);
2531 for (i = 0; i < adev->num_ip_blocks; i++) {
2532 if (!adev->ip_blocks[i].status.valid)
2534 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2536 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2537 adev->ip_blocks[i].version->funcs->name, r);
2540 adev->ip_blocks[i].status.sw = true;
2542 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2543 /* need to do common hw init early so everything is set up for gmc */
2544 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2546 DRM_ERROR("hw_init %d failed %d\n", i, r);
2549 adev->ip_blocks[i].status.hw = true;
2550 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2551 /* need to do gmc hw init early so we can allocate gpu mem */
2552 /* Try to reserve bad pages early */
2553 if (amdgpu_sriov_vf(adev))
2554 amdgpu_virt_exchange_data(adev);
2556 r = amdgpu_device_mem_scratch_init(adev);
2558 DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
2561 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2563 DRM_ERROR("hw_init %d failed %d\n", i, r);
2566 r = amdgpu_device_wb_init(adev);
2568 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2571 adev->ip_blocks[i].status.hw = true;
2573 /* right after GMC hw init, we create CSA */
2574 if (adev->gfx.mcbp) {
2575 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2576 AMDGPU_GEM_DOMAIN_VRAM |
2577 AMDGPU_GEM_DOMAIN_GTT,
2580 DRM_ERROR("allocate CSA failed %d\n", r);
2587 if (amdgpu_sriov_vf(adev))
2588 amdgpu_virt_init_data_exchange(adev);
2590 r = amdgpu_ib_pool_init(adev);
2592 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2593 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2597 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2601 r = amdgpu_device_ip_hw_init_phase1(adev);
2605 r = amdgpu_device_fw_loading(adev);
2609 r = amdgpu_device_ip_hw_init_phase2(adev);
2614 * retired pages will be loaded from eeprom and reserved here,
2615 * it should be called after amdgpu_device_ip_hw_init_phase2 since
2616 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2617 * for I2C communication which only true at this point.
2619 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2620 * failure from bad gpu situation and stop amdgpu init process
2621 * accordingly. For other failed cases, it will still release all
2622 * the resource and print error message, rather than returning one
2623 * negative value to upper level.
2625 * Note: theoretically, this should be called before all vram allocations
2626 * to protect retired page from abusing
2628 r = amdgpu_ras_recovery_init(adev);
2633 * In case of XGMI grab extra reference for reset domain for this device
2635 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2636 if (amdgpu_xgmi_add_device(adev) == 0) {
2637 if (!amdgpu_sriov_vf(adev)) {
2638 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2640 if (WARN_ON(!hive)) {
2645 if (!hive->reset_domain ||
2646 !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2648 amdgpu_put_xgmi_hive(hive);
2652 /* Drop the early temporary reset domain we created for device */
2653 amdgpu_reset_put_reset_domain(adev->reset_domain);
2654 adev->reset_domain = hive->reset_domain;
2655 amdgpu_put_xgmi_hive(hive);
2660 r = amdgpu_device_init_schedulers(adev);
2664 /* Don't init kfd if whole hive need to be reset during init */
2665 if (!adev->gmc.xgmi.pending_reset) {
2666 kgd2kfd_init_zone_device(adev);
2667 amdgpu_amdkfd_device_init(adev);
2670 amdgpu_fru_get_product_info(adev);
2678 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2680 * @adev: amdgpu_device pointer
2682 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
2683 * this function before a GPU reset. If the value is retained after a
2684 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
2686 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2688 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2692 * amdgpu_device_check_vram_lost - check if vram is valid
2694 * @adev: amdgpu_device pointer
2696 * Checks the reset magic value written to the gart pointer in VRAM.
2697 * The driver calls this after a GPU reset to see if the contents of
2698 * VRAM is lost or now.
2699 * returns true if vram is lost, false if not.
2701 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2703 if (memcmp(adev->gart.ptr, adev->reset_magic,
2704 AMDGPU_RESET_MAGIC_NUM))
2707 if (!amdgpu_in_reset(adev))
2711 * For all ASICs with baco/mode1 reset, the VRAM is
2712 * always assumed to be lost.
2714 switch (amdgpu_asic_reset_method(adev)) {
2715 case AMD_RESET_METHOD_BACO:
2716 case AMD_RESET_METHOD_MODE1:
2724 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2726 * @adev: amdgpu_device pointer
2727 * @state: clockgating state (gate or ungate)
2729 * The list of all the hardware IPs that make up the asic is walked and the
2730 * set_clockgating_state callbacks are run.
2731 * Late initialization pass enabling clockgating for hardware IPs.
2732 * Fini or suspend, pass disabling clockgating for hardware IPs.
2733 * Returns 0 on success, negative error code on failure.
2736 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2737 enum amd_clockgating_state state)
2741 if (amdgpu_emu_mode == 1)
2744 for (j = 0; j < adev->num_ip_blocks; j++) {
2745 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2746 if (!adev->ip_blocks[i].status.late_initialized)
2748 /* skip CG for GFX, SDMA on S0ix */
2749 if (adev->in_s0ix &&
2750 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2751 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2753 /* skip CG for VCE/UVD, it's handled specially */
2754 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2755 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2756 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2757 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2758 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2759 /* enable clockgating to save power */
2760 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2763 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2764 adev->ip_blocks[i].version->funcs->name, r);
2773 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2774 enum amd_powergating_state state)
2778 if (amdgpu_emu_mode == 1)
2781 for (j = 0; j < adev->num_ip_blocks; j++) {
2782 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2783 if (!adev->ip_blocks[i].status.late_initialized)
2785 /* skip PG for GFX, SDMA on S0ix */
2786 if (adev->in_s0ix &&
2787 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2788 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2790 /* skip CG for VCE/UVD, it's handled specially */
2791 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2792 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2793 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2794 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2795 adev->ip_blocks[i].version->funcs->set_powergating_state) {
2796 /* enable powergating to save power */
2797 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2800 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2801 adev->ip_blocks[i].version->funcs->name, r);
2809 static int amdgpu_device_enable_mgpu_fan_boost(void)
2811 struct amdgpu_gpu_instance *gpu_ins;
2812 struct amdgpu_device *adev;
2815 mutex_lock(&mgpu_info.mutex);
2818 * MGPU fan boost feature should be enabled
2819 * only when there are two or more dGPUs in
2822 if (mgpu_info.num_dgpu < 2)
2825 for (i = 0; i < mgpu_info.num_dgpu; i++) {
2826 gpu_ins = &(mgpu_info.gpu_ins[i]);
2827 adev = gpu_ins->adev;
2828 if (!(adev->flags & AMD_IS_APU) &&
2829 !gpu_ins->mgpu_fan_enabled) {
2830 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2834 gpu_ins->mgpu_fan_enabled = 1;
2839 mutex_unlock(&mgpu_info.mutex);
2845 * amdgpu_device_ip_late_init - run late init for hardware IPs
2847 * @adev: amdgpu_device pointer
2849 * Late initialization pass for hardware IPs. The list of all the hardware
2850 * IPs that make up the asic is walked and the late_init callbacks are run.
2851 * late_init covers any special initialization that an IP requires
2852 * after all of the have been initialized or something that needs to happen
2853 * late in the init process.
2854 * Returns 0 on success, negative error code on failure.
2856 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2858 struct amdgpu_gpu_instance *gpu_instance;
2861 for (i = 0; i < adev->num_ip_blocks; i++) {
2862 if (!adev->ip_blocks[i].status.hw)
2864 if (adev->ip_blocks[i].version->funcs->late_init) {
2865 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2867 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2868 adev->ip_blocks[i].version->funcs->name, r);
2872 adev->ip_blocks[i].status.late_initialized = true;
2875 r = amdgpu_ras_late_init(adev);
2877 DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2881 amdgpu_ras_set_error_query_ready(adev, true);
2883 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2884 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2886 amdgpu_device_fill_reset_magic(adev);
2888 r = amdgpu_device_enable_mgpu_fan_boost();
2890 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2892 /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2893 if (amdgpu_passthrough(adev) &&
2894 ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) ||
2895 adev->asic_type == CHIP_ALDEBARAN))
2896 amdgpu_dpm_handle_passthrough_sbr(adev, true);
2898 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2899 mutex_lock(&mgpu_info.mutex);
2902 * Reset device p-state to low as this was booted with high.
2904 * This should be performed only after all devices from the same
2905 * hive get initialized.
2907 * However, it's unknown how many device in the hive in advance.
2908 * As this is counted one by one during devices initializations.
2910 * So, we wait for all XGMI interlinked devices initialized.
2911 * This may bring some delays as those devices may come from
2912 * different hives. But that should be OK.
2914 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2915 for (i = 0; i < mgpu_info.num_gpu; i++) {
2916 gpu_instance = &(mgpu_info.gpu_ins[i]);
2917 if (gpu_instance->adev->flags & AMD_IS_APU)
2920 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2921 AMDGPU_XGMI_PSTATE_MIN);
2923 DRM_ERROR("pstate setting failed (%d).\n", r);
2929 mutex_unlock(&mgpu_info.mutex);
2936 * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2938 * @adev: amdgpu_device pointer
2940 * For ASICs need to disable SMC first
2942 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2946 if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2949 for (i = 0; i < adev->num_ip_blocks; i++) {
2950 if (!adev->ip_blocks[i].status.hw)
2952 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2953 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2954 /* XXX handle errors */
2956 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2957 adev->ip_blocks[i].version->funcs->name, r);
2959 adev->ip_blocks[i].status.hw = false;
2965 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2969 for (i = 0; i < adev->num_ip_blocks; i++) {
2970 if (!adev->ip_blocks[i].version->funcs->early_fini)
2973 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2975 DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2976 adev->ip_blocks[i].version->funcs->name, r);
2980 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2981 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2983 amdgpu_amdkfd_suspend(adev, false);
2985 /* Workaroud for ASICs need to disable SMC first */
2986 amdgpu_device_smu_fini_early(adev);
2988 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2989 if (!adev->ip_blocks[i].status.hw)
2992 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2993 /* XXX handle errors */
2995 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2996 adev->ip_blocks[i].version->funcs->name, r);
2999 adev->ip_blocks[i].status.hw = false;
3002 if (amdgpu_sriov_vf(adev)) {
3003 if (amdgpu_virt_release_full_gpu(adev, false))
3004 DRM_ERROR("failed to release exclusive mode on fini\n");
3011 * amdgpu_device_ip_fini - run fini for hardware IPs
3013 * @adev: amdgpu_device pointer
3015 * Main teardown pass for hardware IPs. The list of all the hardware
3016 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
3017 * are run. hw_fini tears down the hardware associated with each IP
3018 * and sw_fini tears down any software state associated with each IP.
3019 * Returns 0 on success, negative error code on failure.
3021 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
3025 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
3026 amdgpu_virt_release_ras_err_handler_data(adev);
3028 if (adev->gmc.xgmi.num_physical_nodes > 1)
3029 amdgpu_xgmi_remove_device(adev);
3031 amdgpu_amdkfd_device_fini_sw(adev);
3033 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3034 if (!adev->ip_blocks[i].status.sw)
3037 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
3038 amdgpu_ucode_free_bo(adev);
3039 amdgpu_free_static_csa(&adev->virt.csa_obj);
3040 amdgpu_device_wb_fini(adev);
3041 amdgpu_device_mem_scratch_fini(adev);
3042 amdgpu_ib_pool_fini(adev);
3045 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
3046 /* XXX handle errors */
3048 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
3049 adev->ip_blocks[i].version->funcs->name, r);
3051 adev->ip_blocks[i].status.sw = false;
3052 adev->ip_blocks[i].status.valid = false;
3055 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3056 if (!adev->ip_blocks[i].status.late_initialized)
3058 if (adev->ip_blocks[i].version->funcs->late_fini)
3059 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
3060 adev->ip_blocks[i].status.late_initialized = false;
3063 amdgpu_ras_fini(adev);
3069 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
3071 * @work: work_struct.
3073 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
3075 struct amdgpu_device *adev =
3076 container_of(work, struct amdgpu_device, delayed_init_work.work);
3079 r = amdgpu_ib_ring_tests(adev);
3081 DRM_ERROR("ib ring test failed (%d).\n", r);
3084 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
3086 struct amdgpu_device *adev =
3087 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
3089 WARN_ON_ONCE(adev->gfx.gfx_off_state);
3090 WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
3092 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
3093 adev->gfx.gfx_off_state = true;
3097 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
3099 * @adev: amdgpu_device pointer
3101 * Main suspend function for hardware IPs. The list of all the hardware
3102 * IPs that make up the asic is walked, clockgating is disabled and the
3103 * suspend callbacks are run. suspend puts the hardware and software state
3104 * in each IP into a state suitable for suspend.
3105 * Returns 0 on success, negative error code on failure.
3107 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
3111 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
3112 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
3115 * Per PMFW team's suggestion, driver needs to handle gfxoff
3116 * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
3117 * scenario. Add the missing df cstate disablement here.
3119 if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
3120 dev_warn(adev->dev, "Failed to disallow df cstate");
3122 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3123 if (!adev->ip_blocks[i].status.valid)
3126 /* displays are handled separately */
3127 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
3130 /* XXX handle errors */
3131 r = adev->ip_blocks[i].version->funcs->suspend(adev);
3132 /* XXX handle errors */
3134 DRM_ERROR("suspend of IP block <%s> failed %d\n",
3135 adev->ip_blocks[i].version->funcs->name, r);
3139 adev->ip_blocks[i].status.hw = false;
3146 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
3148 * @adev: amdgpu_device pointer
3150 * Main suspend function for hardware IPs. The list of all the hardware
3151 * IPs that make up the asic is walked, clockgating is disabled and the
3152 * suspend callbacks are run. suspend puts the hardware and software state
3153 * in each IP into a state suitable for suspend.
3154 * Returns 0 on success, negative error code on failure.
3156 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
3161 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
3163 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3164 if (!adev->ip_blocks[i].status.valid)
3166 /* displays are handled in phase1 */
3167 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
3169 /* PSP lost connection when err_event_athub occurs */
3170 if (amdgpu_ras_intr_triggered() &&
3171 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3172 adev->ip_blocks[i].status.hw = false;
3176 /* skip unnecessary suspend if we do not initialize them yet */
3177 if (adev->gmc.xgmi.pending_reset &&
3178 !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3179 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
3180 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3181 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
3182 adev->ip_blocks[i].status.hw = false;
3186 /* skip suspend of gfx/mes and psp for S0ix
3187 * gfx is in gfxoff state, so on resume it will exit gfxoff just
3188 * like at runtime. PSP is also part of the always on hardware
3189 * so no need to suspend it.
3191 if (adev->in_s0ix &&
3192 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3193 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3194 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
3197 /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
3198 if (adev->in_s0ix &&
3199 (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) &&
3200 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
3203 /* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.
3204 * These are in TMR, hence are expected to be reused by PSP-TOS to reload
3205 * from this location and RLC Autoload automatically also gets loaded
3206 * from here based on PMFW -> PSP message during re-init sequence.
3207 * Therefore, the psp suspend & resume should be skipped to avoid destroy
3208 * the TMR and reload FWs again for IMU enabled APU ASICs.
3210 if (amdgpu_in_reset(adev) &&
3211 (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
3212 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3215 /* XXX handle errors */
3216 r = adev->ip_blocks[i].version->funcs->suspend(adev);
3217 /* XXX handle errors */
3219 DRM_ERROR("suspend of IP block <%s> failed %d\n",
3220 adev->ip_blocks[i].version->funcs->name, r);
3222 adev->ip_blocks[i].status.hw = false;
3223 /* handle putting the SMC in the appropriate state */
3224 if (!amdgpu_sriov_vf(adev)) {
3225 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3226 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3228 DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3229 adev->mp1_state, r);
3240 * amdgpu_device_ip_suspend - run suspend for hardware IPs
3242 * @adev: amdgpu_device pointer
3244 * Main suspend function for hardware IPs. The list of all the hardware
3245 * IPs that make up the asic is walked, clockgating is disabled and the
3246 * suspend callbacks are run. suspend puts the hardware and software state
3247 * in each IP into a state suitable for suspend.
3248 * Returns 0 on success, negative error code on failure.
3250 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3254 if (amdgpu_sriov_vf(adev)) {
3255 amdgpu_virt_fini_data_exchange(adev);
3256 amdgpu_virt_request_full_gpu(adev, false);
3259 r = amdgpu_device_ip_suspend_phase1(adev);
3262 r = amdgpu_device_ip_suspend_phase2(adev);
3264 if (amdgpu_sriov_vf(adev))
3265 amdgpu_virt_release_full_gpu(adev, false);
3270 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3274 static enum amd_ip_block_type ip_order[] = {
3275 AMD_IP_BLOCK_TYPE_COMMON,
3276 AMD_IP_BLOCK_TYPE_GMC,
3277 AMD_IP_BLOCK_TYPE_PSP,
3278 AMD_IP_BLOCK_TYPE_IH,
3281 for (i = 0; i < adev->num_ip_blocks; i++) {
3283 struct amdgpu_ip_block *block;
3285 block = &adev->ip_blocks[i];
3286 block->status.hw = false;
3288 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3290 if (block->version->type != ip_order[j] ||
3291 !block->status.valid)
3294 r = block->version->funcs->hw_init(adev);
3295 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3298 block->status.hw = true;
3305 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3309 static enum amd_ip_block_type ip_order[] = {
3310 AMD_IP_BLOCK_TYPE_SMC,
3311 AMD_IP_BLOCK_TYPE_DCE,
3312 AMD_IP_BLOCK_TYPE_GFX,
3313 AMD_IP_BLOCK_TYPE_SDMA,
3314 AMD_IP_BLOCK_TYPE_MES,
3315 AMD_IP_BLOCK_TYPE_UVD,
3316 AMD_IP_BLOCK_TYPE_VCE,
3317 AMD_IP_BLOCK_TYPE_VCN,
3318 AMD_IP_BLOCK_TYPE_JPEG
3321 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3323 struct amdgpu_ip_block *block;
3325 for (j = 0; j < adev->num_ip_blocks; j++) {
3326 block = &adev->ip_blocks[j];
3328 if (block->version->type != ip_order[i] ||
3329 !block->status.valid ||
3333 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3334 r = block->version->funcs->resume(adev);
3336 r = block->version->funcs->hw_init(adev);
3338 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3341 block->status.hw = true;
3349 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3351 * @adev: amdgpu_device pointer
3353 * First resume function for hardware IPs. The list of all the hardware
3354 * IPs that make up the asic is walked and the resume callbacks are run for
3355 * COMMON, GMC, and IH. resume puts the hardware into a functional state
3356 * after a suspend and updates the software state as necessary. This
3357 * function is also used for restoring the GPU after a GPU reset.
3358 * Returns 0 on success, negative error code on failure.
3360 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3364 for (i = 0; i < adev->num_ip_blocks; i++) {
3365 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3367 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3368 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3369 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3370 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3372 r = adev->ip_blocks[i].version->funcs->resume(adev);
3374 DRM_ERROR("resume of IP block <%s> failed %d\n",
3375 adev->ip_blocks[i].version->funcs->name, r);
3378 adev->ip_blocks[i].status.hw = true;
3386 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3388 * @adev: amdgpu_device pointer
3390 * First resume function for hardware IPs. The list of all the hardware
3391 * IPs that make up the asic is walked and the resume callbacks are run for
3392 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
3393 * functional state after a suspend and updates the software state as
3394 * necessary. This function is also used for restoring the GPU after a GPU
3396 * Returns 0 on success, negative error code on failure.
3398 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3402 for (i = 0; i < adev->num_ip_blocks; i++) {
3403 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3405 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3406 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3407 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3408 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3410 r = adev->ip_blocks[i].version->funcs->resume(adev);
3412 DRM_ERROR("resume of IP block <%s> failed %d\n",
3413 adev->ip_blocks[i].version->funcs->name, r);
3416 adev->ip_blocks[i].status.hw = true;
3423 * amdgpu_device_ip_resume - run resume for hardware IPs
3425 * @adev: amdgpu_device pointer
3427 * Main resume function for hardware IPs. The hardware IPs
3428 * are split into two resume functions because they are
3429 * are also used in in recovering from a GPU reset and some additional
3430 * steps need to be take between them. In this case (S3/S4) they are
3432 * Returns 0 on success, negative error code on failure.
3434 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3438 if (!adev->in_s0ix) {
3439 r = amdgpu_amdkfd_resume_iommu(adev);
3444 r = amdgpu_device_ip_resume_phase1(adev);
3448 r = amdgpu_device_fw_loading(adev);
3452 r = amdgpu_device_ip_resume_phase2(adev);
3458 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3460 * @adev: amdgpu_device pointer
3462 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3464 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3466 if (amdgpu_sriov_vf(adev)) {
3467 if (adev->is_atom_fw) {
3468 if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3469 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3471 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3472 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3475 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3476 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3481 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3483 * @asic_type: AMD asic type
3485 * Check if there is DC (new modesetting infrastructre) support for an asic.
3486 * returns true if DC has support, false if not.
3488 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3490 switch (asic_type) {
3491 #ifdef CONFIG_DRM_AMDGPU_SI
3495 /* chips with no display hardware */
3497 #if defined(CONFIG_DRM_AMD_DC)
3503 * We have systems in the wild with these ASICs that require
3504 * LVDS and VGA support which is not supported with DC.
3506 * Fallback to the non-DC driver here by default so as not to
3507 * cause regressions.
3509 #if defined(CONFIG_DRM_AMD_DC_SI)
3510 return amdgpu_dc > 0;
3519 * We have systems in the wild with these ASICs that require
3520 * VGA support which is not supported with DC.
3522 * Fallback to the non-DC driver here by default so as not to
3523 * cause regressions.
3525 return amdgpu_dc > 0;
3527 return amdgpu_dc != 0;
3531 DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3532 "but isn't supported by ASIC, ignoring\n");
3539 * amdgpu_device_has_dc_support - check if dc is supported
3541 * @adev: amdgpu_device pointer
3543 * Returns true for supported, false for not supported
3545 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3547 if (adev->enable_virtual_display ||
3548 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3551 return amdgpu_device_asic_has_dc_support(adev->asic_type);
3554 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3556 struct amdgpu_device *adev =
3557 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3558 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3560 /* It's a bug to not have a hive within this function */
3565 * Use task barrier to synchronize all xgmi reset works across the
3566 * hive. task_barrier_enter and task_barrier_exit will block
3567 * until all the threads running the xgmi reset works reach
3568 * those points. task_barrier_full will do both blocks.
3570 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3572 task_barrier_enter(&hive->tb);
3573 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3575 if (adev->asic_reset_res)
3578 task_barrier_exit(&hive->tb);
3579 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3581 if (adev->asic_reset_res)
3584 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3585 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3586 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3589 task_barrier_full(&hive->tb);
3590 adev->asic_reset_res = amdgpu_asic_reset(adev);
3594 if (adev->asic_reset_res)
3595 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3596 adev->asic_reset_res, adev_to_drm(adev)->unique);
3597 amdgpu_put_xgmi_hive(hive);
3600 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3602 char *input = amdgpu_lockup_timeout;
3603 char *timeout_setting = NULL;
3609 * By default timeout for non compute jobs is 10000
3610 * and 60000 for compute jobs.
3611 * In SR-IOV or passthrough mode, timeout for compute
3612 * jobs are 60000 by default.
3614 adev->gfx_timeout = msecs_to_jiffies(10000);
3615 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3616 if (amdgpu_sriov_vf(adev))
3617 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3618 msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3620 adev->compute_timeout = msecs_to_jiffies(60000);
3622 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3623 while ((timeout_setting = strsep(&input, ",")) &&
3624 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3625 ret = kstrtol(timeout_setting, 0, &timeout);
3632 } else if (timeout < 0) {
3633 timeout = MAX_SCHEDULE_TIMEOUT;
3634 dev_warn(adev->dev, "lockup timeout disabled");
3635 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3637 timeout = msecs_to_jiffies(timeout);
3642 adev->gfx_timeout = timeout;
3645 adev->compute_timeout = timeout;
3648 adev->sdma_timeout = timeout;
3651 adev->video_timeout = timeout;
3658 * There is only one value specified and
3659 * it should apply to all non-compute jobs.
3662 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3663 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3664 adev->compute_timeout = adev->gfx_timeout;
3672 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3674 * @adev: amdgpu_device pointer
3676 * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3678 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3680 struct iommu_domain *domain;
3682 domain = iommu_get_domain_for_dev(adev->dev);
3683 if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3684 adev->ram_is_direct_mapped = true;
3687 static const struct attribute *amdgpu_dev_attributes[] = {
3688 &dev_attr_product_name.attr,
3689 &dev_attr_product_number.attr,
3690 &dev_attr_serial_number.attr,
3691 &dev_attr_pcie_replay_count.attr,
3695 static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
3697 if (amdgpu_mcbp == 1)
3698 adev->gfx.mcbp = true;
3700 if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 0, 0)) &&
3701 (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 0, 0)) &&
3702 adev->gfx.num_gfx_rings)
3703 adev->gfx.mcbp = true;
3705 if (amdgpu_sriov_vf(adev))
3706 adev->gfx.mcbp = true;
3709 DRM_INFO("MCBP is enabled\n");
3713 * amdgpu_device_init - initialize the driver
3715 * @adev: amdgpu_device pointer
3716 * @flags: driver flags
3718 * Initializes the driver info and hw (all asics).
3719 * Returns 0 for success or an error on failure.
3720 * Called at driver startup.
3722 int amdgpu_device_init(struct amdgpu_device *adev,
3725 struct drm_device *ddev = adev_to_drm(adev);
3726 struct pci_dev *pdev = adev->pdev;
3732 adev->shutdown = false;
3733 adev->flags = flags;
3735 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3736 adev->asic_type = amdgpu_force_asic_type;
3738 adev->asic_type = flags & AMD_ASIC_MASK;
3740 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3741 if (amdgpu_emu_mode == 1)
3742 adev->usec_timeout *= 10;
3743 adev->gmc.gart_size = 512 * 1024 * 1024;
3744 adev->accel_working = false;
3745 adev->num_rings = 0;
3746 RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3747 adev->mman.buffer_funcs = NULL;
3748 adev->mman.buffer_funcs_ring = NULL;
3749 adev->vm_manager.vm_pte_funcs = NULL;
3750 adev->vm_manager.vm_pte_num_scheds = 0;
3751 adev->gmc.gmc_funcs = NULL;
3752 adev->harvest_ip_mask = 0x0;
3753 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3754 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3756 adev->smc_rreg = &amdgpu_invalid_rreg;
3757 adev->smc_wreg = &amdgpu_invalid_wreg;
3758 adev->pcie_rreg = &amdgpu_invalid_rreg;
3759 adev->pcie_wreg = &amdgpu_invalid_wreg;
3760 adev->pcie_rreg_ext = &amdgpu_invalid_rreg_ext;
3761 adev->pcie_wreg_ext = &amdgpu_invalid_wreg_ext;
3762 adev->pciep_rreg = &amdgpu_invalid_rreg;
3763 adev->pciep_wreg = &amdgpu_invalid_wreg;
3764 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3765 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3766 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3767 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3768 adev->didt_rreg = &amdgpu_invalid_rreg;
3769 adev->didt_wreg = &amdgpu_invalid_wreg;
3770 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3771 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3772 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3773 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3775 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3776 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3777 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3779 /* mutex initialization are all done here so we
3780 * can recall function without having locking issues */
3781 mutex_init(&adev->firmware.mutex);
3782 mutex_init(&adev->pm.mutex);
3783 mutex_init(&adev->gfx.gpu_clock_mutex);
3784 mutex_init(&adev->srbm_mutex);
3785 mutex_init(&adev->gfx.pipe_reserve_mutex);
3786 mutex_init(&adev->gfx.gfx_off_mutex);
3787 mutex_init(&adev->gfx.partition_mutex);
3788 mutex_init(&adev->grbm_idx_mutex);
3789 mutex_init(&adev->mn_lock);
3790 mutex_init(&adev->virt.vf_errors.lock);
3791 hash_init(adev->mn_hash);
3792 mutex_init(&adev->psp.mutex);
3793 mutex_init(&adev->notifier_lock);
3794 mutex_init(&adev->pm.stable_pstate_ctx_lock);
3795 mutex_init(&adev->benchmark_mutex);
3797 amdgpu_device_init_apu_flags(adev);
3799 r = amdgpu_device_check_arguments(adev);
3803 spin_lock_init(&adev->mmio_idx_lock);
3804 spin_lock_init(&adev->smc_idx_lock);
3805 spin_lock_init(&adev->pcie_idx_lock);
3806 spin_lock_init(&adev->uvd_ctx_idx_lock);
3807 spin_lock_init(&adev->didt_idx_lock);
3808 spin_lock_init(&adev->gc_cac_idx_lock);
3809 spin_lock_init(&adev->se_cac_idx_lock);
3810 spin_lock_init(&adev->audio_endpt_idx_lock);
3811 spin_lock_init(&adev->mm_stats.lock);
3813 INIT_LIST_HEAD(&adev->shadow_list);
3814 mutex_init(&adev->shadow_list_lock);
3816 INIT_LIST_HEAD(&adev->reset_list);
3818 INIT_LIST_HEAD(&adev->ras_list);
3820 INIT_DELAYED_WORK(&adev->delayed_init_work,
3821 amdgpu_device_delayed_init_work_handler);
3822 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3823 amdgpu_device_delay_enable_gfx_off);
3825 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3827 adev->gfx.gfx_off_req_count = 1;
3828 adev->gfx.gfx_off_residency = 0;
3829 adev->gfx.gfx_off_entrycount = 0;
3830 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3832 atomic_set(&adev->throttling_logging_enabled, 1);
3834 * If throttling continues, logging will be performed every minute
3835 * to avoid log flooding. "-1" is subtracted since the thermal
3836 * throttling interrupt comes every second. Thus, the total logging
3837 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3838 * for throttling interrupt) = 60 seconds.
3840 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3841 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3843 /* Registers mapping */
3844 /* TODO: block userspace mapping of io register */
3845 if (adev->asic_type >= CHIP_BONAIRE) {
3846 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3847 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3849 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3850 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3853 for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3854 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3856 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3857 if (adev->rmmio == NULL) {
3860 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3861 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3864 * Reset domain needs to be present early, before XGMI hive discovered
3865 * (if any) and intitialized to use reset sem and in_gpu reset flag
3866 * early on during init and before calling to RREG32.
3868 adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3869 if (!adev->reset_domain)
3872 /* detect hw virtualization here */
3873 amdgpu_detect_virtualization(adev);
3875 amdgpu_device_get_pcie_info(adev);
3877 r = amdgpu_device_get_job_timeout_settings(adev);
3879 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3883 /* early init functions */
3884 r = amdgpu_device_ip_early_init(adev);
3888 amdgpu_device_set_mcbp(adev);
3890 /* Get rid of things like offb */
3891 r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
3895 /* Enable TMZ based on IP_VERSION */
3896 amdgpu_gmc_tmz_set(adev);
3898 amdgpu_gmc_noretry_set(adev);
3899 /* Need to get xgmi info early to decide the reset behavior*/
3900 if (adev->gmc.xgmi.supported) {
3901 r = adev->gfxhub.funcs->get_xgmi_info(adev);
3906 /* enable PCIE atomic ops */
3907 if (amdgpu_sriov_vf(adev)) {
3908 if (adev->virt.fw_reserve.p_pf2vf)
3909 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3910 adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3911 (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3912 /* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
3913 * internal path natively support atomics, set have_atomics_support to true.
3915 } else if ((adev->flags & AMD_IS_APU) &&
3916 (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))) {
3917 adev->have_atomics_support = true;
3919 adev->have_atomics_support =
3920 !pci_enable_atomic_ops_to_root(adev->pdev,
3921 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3922 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3925 if (!adev->have_atomics_support)
3926 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3928 /* doorbell bar mapping and doorbell index init*/
3929 amdgpu_device_doorbell_init(adev);
3931 if (amdgpu_emu_mode == 1) {
3932 /* post the asic on emulation mode */
3933 emu_soc_asic_init(adev);
3934 goto fence_driver_init;
3937 amdgpu_reset_init(adev);
3939 /* detect if we are with an SRIOV vbios */
3941 amdgpu_device_detect_sriov_bios(adev);
3943 /* check if we need to reset the asic
3944 * E.g., driver was not cleanly unloaded previously, etc.
3946 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3947 if (adev->gmc.xgmi.num_physical_nodes) {
3948 dev_info(adev->dev, "Pending hive reset.\n");
3949 adev->gmc.xgmi.pending_reset = true;
3950 /* Only need to init necessary block for SMU to handle the reset */
3951 for (i = 0; i < adev->num_ip_blocks; i++) {
3952 if (!adev->ip_blocks[i].status.valid)
3954 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3955 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3956 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3957 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3958 DRM_DEBUG("IP %s disabled for hw_init.\n",
3959 adev->ip_blocks[i].version->funcs->name);
3960 adev->ip_blocks[i].status.hw = true;
3964 tmp = amdgpu_reset_method;
3965 /* It should do a default reset when loading or reloading the driver,
3966 * regardless of the module parameter reset_method.
3968 amdgpu_reset_method = AMD_RESET_METHOD_NONE;
3969 r = amdgpu_asic_reset(adev);
3970 amdgpu_reset_method = tmp;
3972 dev_err(adev->dev, "asic reset on init failed\n");
3978 /* Post card if necessary */
3979 if (amdgpu_device_need_post(adev)) {
3981 dev_err(adev->dev, "no vBIOS found\n");
3985 DRM_INFO("GPU posting now...\n");
3986 r = amdgpu_device_asic_init(adev);
3988 dev_err(adev->dev, "gpu post error!\n");
3994 if (adev->is_atom_fw) {
3995 /* Initialize clocks */
3996 r = amdgpu_atomfirmware_get_clock_info(adev);
3998 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3999 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
4003 /* Initialize clocks */
4004 r = amdgpu_atombios_get_clock_info(adev);
4006 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
4007 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
4010 /* init i2c buses */
4011 if (!amdgpu_device_has_dc_support(adev))
4012 amdgpu_atombios_i2c_init(adev);
4018 r = amdgpu_fence_driver_sw_init(adev);
4020 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
4021 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
4025 /* init the mode config */
4026 drm_mode_config_init(adev_to_drm(adev));
4028 r = amdgpu_device_ip_init(adev);
4030 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
4031 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
4032 goto release_ras_con;
4035 amdgpu_fence_driver_hw_init(adev);
4038 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
4039 adev->gfx.config.max_shader_engines,
4040 adev->gfx.config.max_sh_per_se,
4041 adev->gfx.config.max_cu_per_sh,
4042 adev->gfx.cu_info.number);
4044 adev->accel_working = true;
4046 amdgpu_vm_check_compute_bug(adev);
4048 /* Initialize the buffer migration limit. */
4049 if (amdgpu_moverate >= 0)
4050 max_MBps = amdgpu_moverate;
4052 max_MBps = 8; /* Allow 8 MB/s. */
4053 /* Get a log2 for easy divisions. */
4054 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
4056 r = amdgpu_atombios_sysfs_init(adev);
4058 drm_err(&adev->ddev,
4059 "registering atombios sysfs failed (%d).\n", r);
4061 r = amdgpu_pm_sysfs_init(adev);
4063 DRM_ERROR("registering pm sysfs failed (%d).\n", r);
4065 r = amdgpu_ucode_sysfs_init(adev);
4067 adev->ucode_sysfs_en = false;
4068 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
4070 adev->ucode_sysfs_en = true;
4072 r = amdgpu_psp_sysfs_init(adev);
4074 adev->psp_sysfs_en = false;
4075 if (!amdgpu_sriov_vf(adev))
4076 DRM_ERROR("Creating psp sysfs failed\n");
4078 adev->psp_sysfs_en = true;
4081 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
4082 * Otherwise the mgpu fan boost feature will be skipped due to the
4083 * gpu instance is counted less.
4085 amdgpu_register_gpu_instance(adev);
4087 /* enable clockgating, etc. after ib tests, etc. since some blocks require
4088 * explicit gating rather than handling it automatically.
4090 if (!adev->gmc.xgmi.pending_reset) {
4091 r = amdgpu_device_ip_late_init(adev);
4093 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
4094 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
4095 goto release_ras_con;
4098 amdgpu_ras_resume(adev);
4099 queue_delayed_work(system_wq, &adev->delayed_init_work,
4100 msecs_to_jiffies(AMDGPU_RESUME_MS));
4103 if (amdgpu_sriov_vf(adev)) {
4104 amdgpu_virt_release_full_gpu(adev, true);
4105 flush_delayed_work(&adev->delayed_init_work);
4108 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
4110 dev_err(adev->dev, "Could not create amdgpu device attr\n");
4112 if (IS_ENABLED(CONFIG_PERF_EVENTS))
4113 r = amdgpu_pmu_init(adev);
4115 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
4117 /* Have stored pci confspace at hand for restore in sudden PCI error */
4118 if (amdgpu_device_cache_pci_state(adev->pdev))
4119 pci_restore_state(pdev);
4121 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
4122 /* this will fail for cards that aren't VGA class devices, just
4124 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4125 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
4127 px = amdgpu_device_supports_px(ddev);
4129 if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
4130 apple_gmux_detect(NULL, NULL)))
4131 vga_switcheroo_register_client(adev->pdev,
4132 &amdgpu_switcheroo_ops, px);
4135 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
4137 if (adev->gmc.xgmi.pending_reset)
4138 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
4139 msecs_to_jiffies(AMDGPU_RESUME_MS));
4141 amdgpu_device_check_iommu_direct_map(adev);
4146 if (amdgpu_sriov_vf(adev))
4147 amdgpu_virt_release_full_gpu(adev, true);
4149 /* failed in exclusive mode due to timeout */
4150 if (amdgpu_sriov_vf(adev) &&
4151 !amdgpu_sriov_runtime(adev) &&
4152 amdgpu_virt_mmio_blocked(adev) &&
4153 !amdgpu_virt_wait_reset(adev)) {
4154 dev_err(adev->dev, "VF exclusive mode timeout\n");
4155 /* Don't send request since VF is inactive. */
4156 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
4157 adev->virt.ops = NULL;
4160 amdgpu_release_ras_context(adev);
4163 amdgpu_vf_error_trans_all(adev);
4168 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
4171 /* Clear all CPU mappings pointing to this device */
4172 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
4174 /* Unmap all mapped bars - Doorbell, registers and VRAM */
4175 amdgpu_device_doorbell_fini(adev);
4177 iounmap(adev->rmmio);
4179 if (adev->mman.aper_base_kaddr)
4180 iounmap(adev->mman.aper_base_kaddr);
4181 adev->mman.aper_base_kaddr = NULL;
4183 /* Memory manager related */
4184 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
4185 arch_phys_wc_del(adev->gmc.vram_mtrr);
4186 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
4191 * amdgpu_device_fini_hw - tear down the driver
4193 * @adev: amdgpu_device pointer
4195 * Tear down the driver info (all asics).
4196 * Called at driver shutdown.
4198 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
4200 dev_info(adev->dev, "amdgpu: finishing device.\n");
4201 flush_delayed_work(&adev->delayed_init_work);
4202 adev->shutdown = true;
4204 /* make sure IB test finished before entering exclusive mode
4205 * to avoid preemption on IB test
4207 if (amdgpu_sriov_vf(adev)) {
4208 amdgpu_virt_request_full_gpu(adev, false);
4209 amdgpu_virt_fini_data_exchange(adev);
4212 /* disable all interrupts */
4213 amdgpu_irq_disable_all(adev);
4214 if (adev->mode_info.mode_config_initialized) {
4215 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
4216 drm_helper_force_disable_all(adev_to_drm(adev));
4218 drm_atomic_helper_shutdown(adev_to_drm(adev));
4220 amdgpu_fence_driver_hw_fini(adev);
4222 if (adev->mman.initialized)
4223 drain_workqueue(adev->mman.bdev.wq);
4225 if (adev->pm.sysfs_initialized)
4226 amdgpu_pm_sysfs_fini(adev);
4227 if (adev->ucode_sysfs_en)
4228 amdgpu_ucode_sysfs_fini(adev);
4229 if (adev->psp_sysfs_en)
4230 amdgpu_psp_sysfs_fini(adev);
4231 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
4233 /* disable ras feature must before hw fini */
4234 amdgpu_ras_pre_fini(adev);
4236 amdgpu_device_ip_fini_early(adev);
4238 amdgpu_irq_fini_hw(adev);
4240 if (adev->mman.initialized)
4241 ttm_device_clear_dma_mappings(&adev->mman.bdev);
4243 amdgpu_gart_dummy_page_fini(adev);
4245 if (drm_dev_is_unplugged(adev_to_drm(adev)))
4246 amdgpu_device_unmap_mmio(adev);
4250 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4255 amdgpu_fence_driver_sw_fini(adev);
4256 amdgpu_device_ip_fini(adev);
4257 amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
4258 adev->accel_working = false;
4259 dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4261 amdgpu_reset_fini(adev);
4263 /* free i2c buses */
4264 if (!amdgpu_device_has_dc_support(adev))
4265 amdgpu_i2c_fini(adev);
4267 if (amdgpu_emu_mode != 1)
4268 amdgpu_atombios_fini(adev);
4273 px = amdgpu_device_supports_px(adev_to_drm(adev));
4275 if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
4276 apple_gmux_detect(NULL, NULL)))
4277 vga_switcheroo_unregister_client(adev->pdev);
4280 vga_switcheroo_fini_domain_pm_ops(adev->dev);
4282 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4283 vga_client_unregister(adev->pdev);
4285 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4287 iounmap(adev->rmmio);
4289 amdgpu_device_doorbell_fini(adev);
4293 if (IS_ENABLED(CONFIG_PERF_EVENTS))
4294 amdgpu_pmu_fini(adev);
4295 if (adev->mman.discovery_bin)
4296 amdgpu_discovery_fini(adev);
4298 amdgpu_reset_put_reset_domain(adev->reset_domain);
4299 adev->reset_domain = NULL;
4301 kfree(adev->pci_state);
4306 * amdgpu_device_evict_resources - evict device resources
4307 * @adev: amdgpu device object
4309 * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4310 * of the vram memory type. Mainly used for evicting device resources
4314 static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4318 /* No need to evict vram on APUs for suspend to ram or s2idle */
4319 if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4322 ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4324 DRM_WARN("evicting device resources failed\n");
4332 * amdgpu_device_suspend - initiate device suspend
4334 * @dev: drm dev pointer
4335 * @fbcon : notify the fbdev of suspend
4337 * Puts the hw in the suspend state (all asics).
4338 * Returns 0 for success or an error on failure.
4339 * Called at driver suspend.
4341 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4343 struct amdgpu_device *adev = drm_to_adev(dev);
4346 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4349 adev->in_suspend = true;
4351 /* Evict the majority of BOs before grabbing the full access */
4352 r = amdgpu_device_evict_resources(adev);
4356 if (amdgpu_sriov_vf(adev)) {
4357 amdgpu_virt_fini_data_exchange(adev);
4358 r = amdgpu_virt_request_full_gpu(adev, false);
4363 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4364 DRM_WARN("smart shift update failed\n");
4367 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4369 cancel_delayed_work_sync(&adev->delayed_init_work);
4371 amdgpu_ras_suspend(adev);
4373 amdgpu_device_ip_suspend_phase1(adev);
4376 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4378 r = amdgpu_device_evict_resources(adev);
4382 amdgpu_fence_driver_hw_fini(adev);
4384 amdgpu_device_ip_suspend_phase2(adev);
4386 if (amdgpu_sriov_vf(adev))
4387 amdgpu_virt_release_full_gpu(adev, false);
4393 * amdgpu_device_resume - initiate device resume
4395 * @dev: drm dev pointer
4396 * @fbcon : notify the fbdev of resume
4398 * Bring the hw back to operating state (all asics).
4399 * Returns 0 for success or an error on failure.
4400 * Called at driver resume.
4402 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4404 struct amdgpu_device *adev = drm_to_adev(dev);
4407 if (amdgpu_sriov_vf(adev)) {
4408 r = amdgpu_virt_request_full_gpu(adev, true);
4413 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4417 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4420 if (amdgpu_device_need_post(adev)) {
4421 r = amdgpu_device_asic_init(adev);
4423 dev_err(adev->dev, "amdgpu asic init failed\n");
4426 r = amdgpu_device_ip_resume(adev);
4429 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4432 amdgpu_fence_driver_hw_init(adev);
4434 r = amdgpu_device_ip_late_init(adev);
4438 queue_delayed_work(system_wq, &adev->delayed_init_work,
4439 msecs_to_jiffies(AMDGPU_RESUME_MS));
4441 if (!adev->in_s0ix) {
4442 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4448 if (amdgpu_sriov_vf(adev)) {
4449 amdgpu_virt_init_data_exchange(adev);
4450 amdgpu_virt_release_full_gpu(adev, true);
4456 /* Make sure IB tests flushed */
4457 flush_delayed_work(&adev->delayed_init_work);
4460 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4462 amdgpu_ras_resume(adev);
4464 if (adev->mode_info.num_crtc) {
4466 * Most of the connector probing functions try to acquire runtime pm
4467 * refs to ensure that the GPU is powered on when connector polling is
4468 * performed. Since we're calling this from a runtime PM callback,
4469 * trying to acquire rpm refs will cause us to deadlock.
4471 * Since we're guaranteed to be holding the rpm lock, it's safe to
4472 * temporarily disable the rpm helpers so this doesn't deadlock us.
4475 dev->dev->power.disable_depth++;
4477 if (!adev->dc_enabled)
4478 drm_helper_hpd_irq_event(dev);
4480 drm_kms_helper_hotplug_event(dev);
4482 dev->dev->power.disable_depth--;
4485 adev->in_suspend = false;
4487 if (adev->enable_mes)
4488 amdgpu_mes_self_test(adev);
4490 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4491 DRM_WARN("smart shift update failed\n");
4497 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4499 * @adev: amdgpu_device pointer
4501 * The list of all the hardware IPs that make up the asic is walked and
4502 * the check_soft_reset callbacks are run. check_soft_reset determines
4503 * if the asic is still hung or not.
4504 * Returns true if any of the IPs are still in a hung state, false if not.
4506 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4509 bool asic_hang = false;
4511 if (amdgpu_sriov_vf(adev))
4514 if (amdgpu_asic_need_full_reset(adev))
4517 for (i = 0; i < adev->num_ip_blocks; i++) {
4518 if (!adev->ip_blocks[i].status.valid)
4520 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4521 adev->ip_blocks[i].status.hang =
4522 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4523 if (adev->ip_blocks[i].status.hang) {
4524 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4532 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4534 * @adev: amdgpu_device pointer
4536 * The list of all the hardware IPs that make up the asic is walked and the
4537 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
4538 * handles any IP specific hardware or software state changes that are
4539 * necessary for a soft reset to succeed.
4540 * Returns 0 on success, negative error code on failure.
4542 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4546 for (i = 0; i < adev->num_ip_blocks; i++) {
4547 if (!adev->ip_blocks[i].status.valid)
4549 if (adev->ip_blocks[i].status.hang &&
4550 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4551 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4561 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4563 * @adev: amdgpu_device pointer
4565 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
4566 * reset is necessary to recover.
4567 * Returns true if a full asic reset is required, false if not.
4569 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4573 if (amdgpu_asic_need_full_reset(adev))
4576 for (i = 0; i < adev->num_ip_blocks; i++) {
4577 if (!adev->ip_blocks[i].status.valid)
4579 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4580 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4581 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4582 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4583 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4584 if (adev->ip_blocks[i].status.hang) {
4585 dev_info(adev->dev, "Some block need full reset!\n");
4594 * amdgpu_device_ip_soft_reset - do a soft reset
4596 * @adev: amdgpu_device pointer
4598 * The list of all the hardware IPs that make up the asic is walked and the
4599 * soft_reset callbacks are run if the block is hung. soft_reset handles any
4600 * IP specific hardware or software state changes that are necessary to soft
4602 * Returns 0 on success, negative error code on failure.
4604 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4608 for (i = 0; i < adev->num_ip_blocks; i++) {
4609 if (!adev->ip_blocks[i].status.valid)
4611 if (adev->ip_blocks[i].status.hang &&
4612 adev->ip_blocks[i].version->funcs->soft_reset) {
4613 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4623 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4625 * @adev: amdgpu_device pointer
4627 * The list of all the hardware IPs that make up the asic is walked and the
4628 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
4629 * handles any IP specific hardware or software state changes that are
4630 * necessary after the IP has been soft reset.
4631 * Returns 0 on success, negative error code on failure.
4633 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4637 for (i = 0; i < adev->num_ip_blocks; i++) {
4638 if (!adev->ip_blocks[i].status.valid)
4640 if (adev->ip_blocks[i].status.hang &&
4641 adev->ip_blocks[i].version->funcs->post_soft_reset)
4642 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4651 * amdgpu_device_recover_vram - Recover some VRAM contents
4653 * @adev: amdgpu_device pointer
4655 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
4656 * restore things like GPUVM page tables after a GPU reset where
4657 * the contents of VRAM might be lost.
4660 * 0 on success, negative error code on failure.
4662 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4664 struct dma_fence *fence = NULL, *next = NULL;
4665 struct amdgpu_bo *shadow;
4666 struct amdgpu_bo_vm *vmbo;
4669 if (amdgpu_sriov_runtime(adev))
4670 tmo = msecs_to_jiffies(8000);
4672 tmo = msecs_to_jiffies(100);
4674 dev_info(adev->dev, "recover vram bo from shadow start\n");
4675 mutex_lock(&adev->shadow_list_lock);
4676 list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4677 /* If vm is compute context or adev is APU, shadow will be NULL */
4680 shadow = vmbo->shadow;
4682 /* No need to recover an evicted BO */
4683 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4684 shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4685 shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4688 r = amdgpu_bo_restore_shadow(shadow, &next);
4693 tmo = dma_fence_wait_timeout(fence, false, tmo);
4694 dma_fence_put(fence);
4699 } else if (tmo < 0) {
4707 mutex_unlock(&adev->shadow_list_lock);
4710 tmo = dma_fence_wait_timeout(fence, false, tmo);
4711 dma_fence_put(fence);
4713 if (r < 0 || tmo <= 0) {
4714 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4718 dev_info(adev->dev, "recover vram bo from shadow done\n");
4724 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4726 * @adev: amdgpu_device pointer
4727 * @from_hypervisor: request from hypervisor
4729 * do VF FLR and reinitialize Asic
4730 * return 0 means succeeded otherwise failed
4732 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4733 bool from_hypervisor)
4736 struct amdgpu_hive_info *hive = NULL;
4737 int retry_limit = 0;
4740 amdgpu_amdkfd_pre_reset(adev);
4742 if (from_hypervisor)
4743 r = amdgpu_virt_request_full_gpu(adev, true);
4745 r = amdgpu_virt_reset_gpu(adev);
4749 /* Resume IP prior to SMC */
4750 r = amdgpu_device_ip_reinit_early_sriov(adev);
4754 amdgpu_virt_init_data_exchange(adev);
4756 r = amdgpu_device_fw_loading(adev);
4760 /* now we are okay to resume SMC/CP/SDMA */
4761 r = amdgpu_device_ip_reinit_late_sriov(adev);
4765 hive = amdgpu_get_xgmi_hive(adev);
4766 /* Update PSP FW topology after reset */
4767 if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4768 r = amdgpu_xgmi_update_topology(hive, adev);
4771 amdgpu_put_xgmi_hive(hive);
4774 amdgpu_irq_gpu_reset_resume_helper(adev);
4775 r = amdgpu_ib_ring_tests(adev);
4777 amdgpu_amdkfd_post_reset(adev);
4781 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4782 amdgpu_inc_vram_lost(adev);
4783 r = amdgpu_device_recover_vram(adev);
4785 amdgpu_virt_release_full_gpu(adev, true);
4787 if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4788 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4792 DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4799 * amdgpu_device_has_job_running - check if there is any job in mirror list
4801 * @adev: amdgpu_device pointer
4803 * check if there is any job in mirror list
4805 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4808 struct drm_sched_job *job;
4810 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4811 struct amdgpu_ring *ring = adev->rings[i];
4813 if (!ring || !ring->sched.thread)
4816 spin_lock(&ring->sched.job_list_lock);
4817 job = list_first_entry_or_null(&ring->sched.pending_list,
4818 struct drm_sched_job, list);
4819 spin_unlock(&ring->sched.job_list_lock);
4827 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4829 * @adev: amdgpu_device pointer
4831 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4834 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4837 if (amdgpu_gpu_recovery == 0)
4840 /* Skip soft reset check in fatal error mode */
4841 if (!amdgpu_ras_is_poison_mode_supported(adev))
4844 if (amdgpu_sriov_vf(adev))
4847 if (amdgpu_gpu_recovery == -1) {
4848 switch (adev->asic_type) {
4849 #ifdef CONFIG_DRM_AMDGPU_SI
4856 #ifdef CONFIG_DRM_AMDGPU_CIK
4863 case CHIP_CYAN_SKILLFISH:
4873 dev_info(adev->dev, "GPU recovery disabled.\n");
4877 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4882 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4884 dev_info(adev->dev, "GPU mode1 reset\n");
4887 pci_clear_master(adev->pdev);
4889 amdgpu_device_cache_pci_state(adev->pdev);
4891 if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4892 dev_info(adev->dev, "GPU smu mode1 reset\n");
4893 ret = amdgpu_dpm_mode1_reset(adev);
4895 dev_info(adev->dev, "GPU psp mode1 reset\n");
4896 ret = psp_gpu_reset(adev);
4900 dev_err(adev->dev, "GPU mode1 reset failed\n");
4902 amdgpu_device_load_pci_state(adev->pdev);
4904 /* wait for asic to come out of reset */
4905 for (i = 0; i < adev->usec_timeout; i++) {
4906 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4908 if (memsize != 0xffffffff)
4913 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4917 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4918 struct amdgpu_reset_context *reset_context)
4921 struct amdgpu_job *job = NULL;
4922 bool need_full_reset =
4923 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4925 if (reset_context->reset_req_dev == adev)
4926 job = reset_context->job;
4928 if (amdgpu_sriov_vf(adev)) {
4929 /* stop the data exchange thread */
4930 amdgpu_virt_fini_data_exchange(adev);
4933 amdgpu_fence_driver_isr_toggle(adev, true);
4935 /* block all schedulers and reset given job's ring */
4936 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4937 struct amdgpu_ring *ring = adev->rings[i];
4939 if (!ring || !ring->sched.thread)
4942 /*clear job fence from fence drv to avoid force_completion
4943 *leave NULL and vm flush fence in fence drv */
4944 amdgpu_fence_driver_clear_job_fences(ring);
4946 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4947 amdgpu_fence_driver_force_completion(ring);
4950 amdgpu_fence_driver_isr_toggle(adev, false);
4953 drm_sched_increase_karma(&job->base);
4955 r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4956 /* If reset handler not implemented, continue; otherwise return */
4962 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4963 if (!amdgpu_sriov_vf(adev)) {
4965 if (!need_full_reset)
4966 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4968 if (!need_full_reset && amdgpu_gpu_recovery &&
4969 amdgpu_device_ip_check_soft_reset(adev)) {
4970 amdgpu_device_ip_pre_soft_reset(adev);
4971 r = amdgpu_device_ip_soft_reset(adev);
4972 amdgpu_device_ip_post_soft_reset(adev);
4973 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4974 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4975 need_full_reset = true;
4979 if (need_full_reset)
4980 r = amdgpu_device_ip_suspend(adev);
4981 if (need_full_reset)
4982 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4984 clear_bit(AMDGPU_NEED_FULL_RESET,
4985 &reset_context->flags);
4991 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
4995 lockdep_assert_held(&adev->reset_domain->sem);
4997 for (i = 0; i < adev->num_regs; i++) {
4998 adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]);
4999 trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
5000 adev->reset_dump_reg_value[i]);
5006 #ifdef CONFIG_DEV_COREDUMP
5007 static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
5008 size_t count, void *data, size_t datalen)
5010 struct drm_printer p;
5011 struct amdgpu_device *adev = data;
5012 struct drm_print_iterator iter;
5017 iter.start = offset;
5018 iter.remain = count;
5020 p = drm_coredump_printer(&iter);
5022 drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
5023 drm_printf(&p, "kernel: " UTS_RELEASE "\n");
5024 drm_printf(&p, "module: " KBUILD_MODNAME "\n");
5025 drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec);
5026 if (adev->reset_task_info.pid)
5027 drm_printf(&p, "process_name: %s PID: %d\n",
5028 adev->reset_task_info.process_name,
5029 adev->reset_task_info.pid);
5031 if (adev->reset_vram_lost)
5032 drm_printf(&p, "VRAM is lost due to GPU reset!\n");
5033 if (adev->num_regs) {
5034 drm_printf(&p, "AMDGPU register dumps:\nOffset: Value:\n");
5036 for (i = 0; i < adev->num_regs; i++)
5037 drm_printf(&p, "0x%08x: 0x%08x\n",
5038 adev->reset_dump_reg_list[i],
5039 adev->reset_dump_reg_value[i]);
5042 return count - iter.remain;
5045 static void amdgpu_devcoredump_free(void *data)
5049 static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
5051 struct drm_device *dev = adev_to_drm(adev);
5053 ktime_get_ts64(&adev->reset_time);
5054 dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
5055 amdgpu_devcoredump_read, amdgpu_devcoredump_free);
5059 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
5060 struct amdgpu_reset_context *reset_context)
5062 struct amdgpu_device *tmp_adev = NULL;
5063 bool need_full_reset, skip_hw_reset, vram_lost = false;
5065 bool gpu_reset_for_dev_remove = 0;
5067 /* Try reset handler method first */
5068 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5070 amdgpu_reset_reg_dumps(tmp_adev);
5072 reset_context->reset_device_list = device_list_handle;
5073 r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
5074 /* If reset handler not implemented, continue; otherwise return */
5080 /* Reset handler not implemented, use the default method */
5082 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5083 skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
5085 gpu_reset_for_dev_remove =
5086 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
5087 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5090 * ASIC reset has to be done on all XGMI hive nodes ASAP
5091 * to allow proper links negotiation in FW (within 1 sec)
5093 if (!skip_hw_reset && need_full_reset) {
5094 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5095 /* For XGMI run all resets in parallel to speed up the process */
5096 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5097 tmp_adev->gmc.xgmi.pending_reset = false;
5098 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
5101 r = amdgpu_asic_reset(tmp_adev);
5104 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
5105 r, adev_to_drm(tmp_adev)->unique);
5110 /* For XGMI wait for all resets to complete before proceed */
5112 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5113 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5114 flush_work(&tmp_adev->xgmi_reset_work);
5115 r = tmp_adev->asic_reset_res;
5123 if (!r && amdgpu_ras_intr_triggered()) {
5124 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5125 if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
5126 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
5127 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
5130 amdgpu_ras_intr_cleared();
5133 /* Since the mode1 reset affects base ip blocks, the
5134 * phase1 ip blocks need to be resumed. Otherwise there
5135 * will be a BIOS signature error and the psp bootloader
5136 * can't load kdb on the next amdgpu install.
5138 if (gpu_reset_for_dev_remove) {
5139 list_for_each_entry(tmp_adev, device_list_handle, reset_list)
5140 amdgpu_device_ip_resume_phase1(tmp_adev);
5145 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5146 if (need_full_reset) {
5148 r = amdgpu_device_asic_init(tmp_adev);
5150 dev_warn(tmp_adev->dev, "asic atom init failed!");
5152 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
5153 r = amdgpu_amdkfd_resume_iommu(tmp_adev);
5157 r = amdgpu_device_ip_resume_phase1(tmp_adev);
5161 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
5162 #ifdef CONFIG_DEV_COREDUMP
5163 tmp_adev->reset_vram_lost = vram_lost;
5164 memset(&tmp_adev->reset_task_info, 0,
5165 sizeof(tmp_adev->reset_task_info));
5166 if (reset_context->job && reset_context->job->vm)
5167 tmp_adev->reset_task_info =
5168 reset_context->job->vm->task_info;
5169 amdgpu_reset_capture_coredumpm(tmp_adev);
5172 DRM_INFO("VRAM is lost due to GPU reset!\n");
5173 amdgpu_inc_vram_lost(tmp_adev);
5176 r = amdgpu_device_fw_loading(tmp_adev);
5180 r = amdgpu_device_ip_resume_phase2(tmp_adev);
5185 amdgpu_device_fill_reset_magic(tmp_adev);
5188 * Add this ASIC as tracked as reset was already
5189 * complete successfully.
5191 amdgpu_register_gpu_instance(tmp_adev);
5193 if (!reset_context->hive &&
5194 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5195 amdgpu_xgmi_add_device(tmp_adev);
5197 r = amdgpu_device_ip_late_init(tmp_adev);
5201 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
5204 * The GPU enters bad state once faulty pages
5205 * by ECC has reached the threshold, and ras
5206 * recovery is scheduled next. So add one check
5207 * here to break recovery if it indeed exceeds
5208 * bad page threshold, and remind user to
5209 * retire this GPU or setting one bigger
5210 * bad_page_threshold value to fix this once
5211 * probing driver again.
5213 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
5215 amdgpu_ras_resume(tmp_adev);
5221 /* Update PSP FW topology after reset */
5222 if (reset_context->hive &&
5223 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5224 r = amdgpu_xgmi_update_topology(
5225 reset_context->hive, tmp_adev);
5231 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
5232 r = amdgpu_ib_ring_tests(tmp_adev);
5234 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
5235 need_full_reset = true;
5242 r = amdgpu_device_recover_vram(tmp_adev);
5244 tmp_adev->asic_reset_res = r;
5248 if (need_full_reset)
5249 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5251 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5255 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5258 switch (amdgpu_asic_reset_method(adev)) {
5259 case AMD_RESET_METHOD_MODE1:
5260 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5262 case AMD_RESET_METHOD_MODE2:
5263 adev->mp1_state = PP_MP1_STATE_RESET;
5266 adev->mp1_state = PP_MP1_STATE_NONE;
5271 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5273 amdgpu_vf_error_trans_all(adev);
5274 adev->mp1_state = PP_MP1_STATE_NONE;
5277 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5279 struct pci_dev *p = NULL;
5281 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5282 adev->pdev->bus->number, 1);
5284 pm_runtime_enable(&(p->dev));
5285 pm_runtime_resume(&(p->dev));
5291 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5293 enum amd_reset_method reset_method;
5294 struct pci_dev *p = NULL;
5298 * For now, only BACO and mode1 reset are confirmed
5299 * to suffer the audio issue without proper suspended.
5301 reset_method = amdgpu_asic_reset_method(adev);
5302 if ((reset_method != AMD_RESET_METHOD_BACO) &&
5303 (reset_method != AMD_RESET_METHOD_MODE1))
5306 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5307 adev->pdev->bus->number, 1);
5311 expires = pm_runtime_autosuspend_expiration(&(p->dev));
5314 * If we cannot get the audio device autosuspend delay,
5315 * a fixed 4S interval will be used. Considering 3S is
5316 * the audio controller default autosuspend delay setting.
5317 * 4S used here is guaranteed to cover that.
5319 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5321 while (!pm_runtime_status_suspended(&(p->dev))) {
5322 if (!pm_runtime_suspend(&(p->dev)))
5325 if (expires < ktime_get_mono_fast_ns()) {
5326 dev_warn(adev->dev, "failed to suspend display audio\n");
5328 /* TODO: abort the succeeding gpu reset? */
5333 pm_runtime_disable(&(p->dev));
5339 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5341 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5343 #if defined(CONFIG_DEBUG_FS)
5344 if (!amdgpu_sriov_vf(adev))
5345 cancel_work(&adev->reset_work);
5349 cancel_work(&adev->kfd.reset_work);
5351 if (amdgpu_sriov_vf(adev))
5352 cancel_work(&adev->virt.flr_work);
5354 if (con && adev->ras_enabled)
5355 cancel_work(&con->recovery_work);
5360 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5362 * @adev: amdgpu_device pointer
5363 * @job: which job trigger hang
5364 * @reset_context: amdgpu reset context pointer
5366 * Attempt to reset the GPU if it has hung (all asics).
5367 * Attempt to do soft-reset or full-reset and reinitialize Asic
5368 * Returns 0 for success or an error on failure.
5371 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5372 struct amdgpu_job *job,
5373 struct amdgpu_reset_context *reset_context)
5375 struct list_head device_list, *device_list_handle = NULL;
5376 bool job_signaled = false;
5377 struct amdgpu_hive_info *hive = NULL;
5378 struct amdgpu_device *tmp_adev = NULL;
5380 bool need_emergency_restart = false;
5381 bool audio_suspended = false;
5382 bool gpu_reset_for_dev_remove = false;
5384 gpu_reset_for_dev_remove =
5385 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
5386 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5389 * Special case: RAS triggered and full reset isn't supported
5391 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5394 * Flush RAM to disk so that after reboot
5395 * the user can read log and see why the system rebooted.
5397 if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
5398 DRM_WARN("Emergency reboot.");
5401 emergency_restart();
5404 dev_info(adev->dev, "GPU %s begin!\n",
5405 need_emergency_restart ? "jobs stop":"reset");
5407 if (!amdgpu_sriov_vf(adev))
5408 hive = amdgpu_get_xgmi_hive(adev);
5410 mutex_lock(&hive->hive_lock);
5412 reset_context->job = job;
5413 reset_context->hive = hive;
5415 * Build list of devices to reset.
5416 * In case we are in XGMI hive mode, resort the device list
5417 * to put adev in the 1st position.
5419 INIT_LIST_HEAD(&device_list);
5420 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5421 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5422 list_add_tail(&tmp_adev->reset_list, &device_list);
5423 if (gpu_reset_for_dev_remove && adev->shutdown)
5424 tmp_adev->shutdown = true;
5426 if (!list_is_first(&adev->reset_list, &device_list))
5427 list_rotate_to_front(&adev->reset_list, &device_list);
5428 device_list_handle = &device_list;
5430 list_add_tail(&adev->reset_list, &device_list);
5431 device_list_handle = &device_list;
5434 /* We need to lock reset domain only once both for XGMI and single device */
5435 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5437 amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5439 /* block all schedulers and reset given job's ring */
5440 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5442 amdgpu_device_set_mp1_state(tmp_adev);
5445 * Try to put the audio codec into suspend state
5446 * before gpu reset started.
5448 * Due to the power domain of the graphics device
5449 * is shared with AZ power domain. Without this,
5450 * we may change the audio hardware from behind
5451 * the audio driver's back. That will trigger
5452 * some audio codec errors.
5454 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5455 audio_suspended = true;
5457 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5459 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5461 if (!amdgpu_sriov_vf(tmp_adev))
5462 amdgpu_amdkfd_pre_reset(tmp_adev);
5465 * Mark these ASICs to be reseted as untracked first
5466 * And add them back after reset completed
5468 amdgpu_unregister_gpu_instance(tmp_adev);
5470 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5472 /* disable ras on ALL IPs */
5473 if (!need_emergency_restart &&
5474 amdgpu_device_ip_need_full_reset(tmp_adev))
5475 amdgpu_ras_suspend(tmp_adev);
5477 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5478 struct amdgpu_ring *ring = tmp_adev->rings[i];
5480 if (!ring || !ring->sched.thread)
5483 drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5485 if (need_emergency_restart)
5486 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5488 atomic_inc(&tmp_adev->gpu_reset_counter);
5491 if (need_emergency_restart)
5492 goto skip_sched_resume;
5495 * Must check guilty signal here since after this point all old
5496 * HW fences are force signaled.
5498 * job->base holds a reference to parent fence
5500 if (job && dma_fence_is_signaled(&job->hw_fence)) {
5501 job_signaled = true;
5502 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5506 retry: /* Rest of adevs pre asic reset from XGMI hive. */
5507 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5508 if (gpu_reset_for_dev_remove) {
5509 /* Workaroud for ASICs need to disable SMC first */
5510 amdgpu_device_smu_fini_early(tmp_adev);
5512 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5513 /*TODO Should we stop ?*/
5515 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5516 r, adev_to_drm(tmp_adev)->unique);
5517 tmp_adev->asic_reset_res = r;
5521 * Drop all pending non scheduler resets. Scheduler resets
5522 * were already dropped during drm_sched_stop
5524 amdgpu_device_stop_pending_resets(tmp_adev);
5527 /* Actual ASIC resets if needed.*/
5528 /* Host driver will handle XGMI hive reset for SRIOV */
5529 if (amdgpu_sriov_vf(adev)) {
5530 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5532 adev->asic_reset_res = r;
5534 /* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */
5535 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
5536 adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3))
5537 amdgpu_ras_resume(adev);
5539 r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5540 if (r && r == -EAGAIN)
5543 if (!r && gpu_reset_for_dev_remove)
5549 /* Post ASIC reset for all devs .*/
5550 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5552 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5553 struct amdgpu_ring *ring = tmp_adev->rings[i];
5555 if (!ring || !ring->sched.thread)
5558 drm_sched_start(&ring->sched, true);
5561 if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))
5562 amdgpu_mes_self_test(tmp_adev);
5564 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
5565 drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5568 if (tmp_adev->asic_reset_res)
5569 r = tmp_adev->asic_reset_res;
5571 tmp_adev->asic_reset_res = 0;
5574 /* bad news, how to tell it to userspace ? */
5575 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5576 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5578 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5579 if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5580 DRM_WARN("smart shift update failed\n");
5585 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5586 /* unlock kfd: SRIOV would do it separately */
5587 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5588 amdgpu_amdkfd_post_reset(tmp_adev);
5590 /* kfd_post_reset will do nothing if kfd device is not initialized,
5591 * need to bring up kfd here if it's not be initialized before
5593 if (!adev->kfd.init_complete)
5594 amdgpu_amdkfd_device_init(adev);
5596 if (audio_suspended)
5597 amdgpu_device_resume_display_audio(tmp_adev);
5599 amdgpu_device_unset_mp1_state(tmp_adev);
5601 amdgpu_ras_set_error_query_ready(tmp_adev, true);
5605 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5607 amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5610 mutex_unlock(&hive->hive_lock);
5611 amdgpu_put_xgmi_hive(hive);
5615 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5617 atomic_set(&adev->reset_domain->reset_res, r);
5622 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5624 * @adev: amdgpu_device pointer
5626 * Fetchs and stores in the driver the PCIE capabilities (gen speed
5627 * and lanes) of the slot the device is in. Handles APUs and
5628 * virtualized environments where PCIE config space may not be available.
5630 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5632 struct pci_dev *pdev;
5633 enum pci_bus_speed speed_cap, platform_speed_cap;
5634 enum pcie_link_width platform_link_width;
5636 if (amdgpu_pcie_gen_cap)
5637 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5639 if (amdgpu_pcie_lane_cap)
5640 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5642 /* covers APUs as well */
5643 if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) {
5644 if (adev->pm.pcie_gen_mask == 0)
5645 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5646 if (adev->pm.pcie_mlw_mask == 0)
5647 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5651 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5654 pcie_bandwidth_available(adev->pdev, NULL,
5655 &platform_speed_cap, &platform_link_width);
5657 if (adev->pm.pcie_gen_mask == 0) {
5660 speed_cap = pcie_get_speed_cap(pdev);
5661 if (speed_cap == PCI_SPEED_UNKNOWN) {
5662 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5663 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5664 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5666 if (speed_cap == PCIE_SPEED_32_0GT)
5667 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5668 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5669 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5670 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5671 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5672 else if (speed_cap == PCIE_SPEED_16_0GT)
5673 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5674 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5675 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5676 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5677 else if (speed_cap == PCIE_SPEED_8_0GT)
5678 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5679 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5680 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5681 else if (speed_cap == PCIE_SPEED_5_0GT)
5682 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5683 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5685 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5688 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5689 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5690 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5692 if (platform_speed_cap == PCIE_SPEED_32_0GT)
5693 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5694 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5695 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5696 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5697 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5698 else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5699 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5700 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5701 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5702 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5703 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5704 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5705 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5706 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5707 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5708 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5709 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5711 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5715 if (adev->pm.pcie_mlw_mask == 0) {
5716 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5717 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5719 switch (platform_link_width) {
5721 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5722 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5723 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5724 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5725 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5726 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5727 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5730 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5731 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5732 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5733 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5734 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5735 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5738 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5739 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5740 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5741 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5742 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5745 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5746 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5747 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5748 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5751 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5752 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5753 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5756 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5757 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5760 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5770 * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5772 * @adev: amdgpu_device pointer
5773 * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5775 * Return true if @peer_adev can access (DMA) @adev through the PCIe
5776 * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5779 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5780 struct amdgpu_device *peer_adev)
5782 #ifdef CONFIG_HSA_AMD_P2P
5783 uint64_t address_mask = peer_adev->dev->dma_mask ?
5784 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5785 resource_size_t aper_limit =
5786 adev->gmc.aper_base + adev->gmc.aper_size - 1;
5788 !adev->gmc.xgmi.connected_to_cpu &&
5789 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
5791 return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5792 adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5793 !(adev->gmc.aper_base & address_mask ||
5794 aper_limit & address_mask));
5800 int amdgpu_device_baco_enter(struct drm_device *dev)
5802 struct amdgpu_device *adev = drm_to_adev(dev);
5803 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5805 if (!amdgpu_device_supports_baco(dev))
5808 if (ras && adev->ras_enabled &&
5809 adev->nbio.funcs->enable_doorbell_interrupt)
5810 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5812 return amdgpu_dpm_baco_enter(adev);
5815 int amdgpu_device_baco_exit(struct drm_device *dev)
5817 struct amdgpu_device *adev = drm_to_adev(dev);
5818 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5821 if (!amdgpu_device_supports_baco(dev))
5824 ret = amdgpu_dpm_baco_exit(adev);
5828 if (ras && adev->ras_enabled &&
5829 adev->nbio.funcs->enable_doorbell_interrupt)
5830 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5832 if (amdgpu_passthrough(adev) &&
5833 adev->nbio.funcs->clear_doorbell_interrupt)
5834 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5840 * amdgpu_pci_error_detected - Called when a PCI error is detected.
5841 * @pdev: PCI device struct
5842 * @state: PCI channel state
5844 * Description: Called when a PCI error is detected.
5846 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5848 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5850 struct drm_device *dev = pci_get_drvdata(pdev);
5851 struct amdgpu_device *adev = drm_to_adev(dev);
5854 DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5856 if (adev->gmc.xgmi.num_physical_nodes > 1) {
5857 DRM_WARN("No support for XGMI hive yet...");
5858 return PCI_ERS_RESULT_DISCONNECT;
5861 adev->pci_channel_state = state;
5864 case pci_channel_io_normal:
5865 return PCI_ERS_RESULT_CAN_RECOVER;
5866 /* Fatal error, prepare for slot reset */
5867 case pci_channel_io_frozen:
5869 * Locking adev->reset_domain->sem will prevent any external access
5870 * to GPU during PCI error recovery
5872 amdgpu_device_lock_reset_domain(adev->reset_domain);
5873 amdgpu_device_set_mp1_state(adev);
5876 * Block any work scheduling as we do for regular GPU reset
5877 * for the duration of the recovery
5879 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5880 struct amdgpu_ring *ring = adev->rings[i];
5882 if (!ring || !ring->sched.thread)
5885 drm_sched_stop(&ring->sched, NULL);
5887 atomic_inc(&adev->gpu_reset_counter);
5888 return PCI_ERS_RESULT_NEED_RESET;
5889 case pci_channel_io_perm_failure:
5890 /* Permanent error, prepare for device removal */
5891 return PCI_ERS_RESULT_DISCONNECT;
5894 return PCI_ERS_RESULT_NEED_RESET;
5898 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5899 * @pdev: pointer to PCI device
5901 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5904 DRM_INFO("PCI error: mmio enabled callback!!\n");
5906 /* TODO - dump whatever for debugging purposes */
5908 /* This called only if amdgpu_pci_error_detected returns
5909 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5910 * works, no need to reset slot.
5913 return PCI_ERS_RESULT_RECOVERED;
5917 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5918 * @pdev: PCI device struct
5920 * Description: This routine is called by the pci error recovery
5921 * code after the PCI slot has been reset, just before we
5922 * should resume normal operations.
5924 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5926 struct drm_device *dev = pci_get_drvdata(pdev);
5927 struct amdgpu_device *adev = drm_to_adev(dev);
5929 struct amdgpu_reset_context reset_context;
5931 struct list_head device_list;
5933 DRM_INFO("PCI error: slot reset callback!!\n");
5935 memset(&reset_context, 0, sizeof(reset_context));
5937 INIT_LIST_HEAD(&device_list);
5938 list_add_tail(&adev->reset_list, &device_list);
5940 /* wait for asic to come out of reset */
5943 /* Restore PCI confspace */
5944 amdgpu_device_load_pci_state(pdev);
5946 /* confirm ASIC came out of reset */
5947 for (i = 0; i < adev->usec_timeout; i++) {
5948 memsize = amdgpu_asic_get_config_memsize(adev);
5950 if (memsize != 0xffffffff)
5954 if (memsize == 0xffffffff) {
5959 reset_context.method = AMD_RESET_METHOD_NONE;
5960 reset_context.reset_req_dev = adev;
5961 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5962 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5964 adev->no_hw_access = true;
5965 r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5966 adev->no_hw_access = false;
5970 r = amdgpu_do_asic_reset(&device_list, &reset_context);
5974 if (amdgpu_device_cache_pci_state(adev->pdev))
5975 pci_restore_state(adev->pdev);
5977 DRM_INFO("PCIe error recovery succeeded\n");
5979 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5980 amdgpu_device_unset_mp1_state(adev);
5981 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5984 return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5988 * amdgpu_pci_resume() - resume normal ops after PCI reset
5989 * @pdev: pointer to PCI device
5991 * Called when the error recovery driver tells us that its
5992 * OK to resume normal operation.
5994 void amdgpu_pci_resume(struct pci_dev *pdev)
5996 struct drm_device *dev = pci_get_drvdata(pdev);
5997 struct amdgpu_device *adev = drm_to_adev(dev);
6001 DRM_INFO("PCI error: resume callback!!\n");
6003 /* Only continue execution for the case of pci_channel_io_frozen */
6004 if (adev->pci_channel_state != pci_channel_io_frozen)
6007 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
6008 struct amdgpu_ring *ring = adev->rings[i];
6010 if (!ring || !ring->sched.thread)
6013 drm_sched_start(&ring->sched, true);
6016 amdgpu_device_unset_mp1_state(adev);
6017 amdgpu_device_unlock_reset_domain(adev->reset_domain);
6020 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
6022 struct drm_device *dev = pci_get_drvdata(pdev);
6023 struct amdgpu_device *adev = drm_to_adev(dev);
6026 r = pci_save_state(pdev);
6028 kfree(adev->pci_state);
6030 adev->pci_state = pci_store_saved_state(pdev);
6032 if (!adev->pci_state) {
6033 DRM_ERROR("Failed to store PCI saved state");
6037 DRM_WARN("Failed to save PCI state, err:%d\n", r);
6044 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
6046 struct drm_device *dev = pci_get_drvdata(pdev);
6047 struct amdgpu_device *adev = drm_to_adev(dev);
6050 if (!adev->pci_state)
6053 r = pci_load_saved_state(pdev, adev->pci_state);
6056 pci_restore_state(pdev);
6058 DRM_WARN("Failed to load PCI state, err:%d\n", r);
6065 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
6066 struct amdgpu_ring *ring)
6068 #ifdef CONFIG_X86_64
6069 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6072 if (adev->gmc.xgmi.connected_to_cpu)
6075 if (ring && ring->funcs->emit_hdp_flush)
6076 amdgpu_ring_emit_hdp_flush(ring);
6078 amdgpu_asic_flush_hdp(adev, ring);
6081 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
6082 struct amdgpu_ring *ring)
6084 #ifdef CONFIG_X86_64
6085 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6088 if (adev->gmc.xgmi.connected_to_cpu)
6091 amdgpu_asic_invalidate_hdp(adev, ring);
6094 int amdgpu_in_reset(struct amdgpu_device *adev)
6096 return atomic_read(&adev->reset_domain->in_gpu_reset);
6100 * amdgpu_device_halt() - bring hardware to some kind of halt state
6102 * @adev: amdgpu_device pointer
6104 * Bring hardware to some kind of halt state so that no one can touch it
6105 * any more. It will help to maintain error context when error occurred.
6106 * Compare to a simple hang, the system will keep stable at least for SSH
6107 * access. Then it should be trivial to inspect the hardware state and
6108 * see what's going on. Implemented as following:
6110 * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
6111 * clears all CPU mappings to device, disallows remappings through page faults
6112 * 2. amdgpu_irq_disable_all() disables all interrupts
6113 * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
6114 * 4. set adev->no_hw_access to avoid potential crashes after setp 5
6115 * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
6116 * 6. pci_disable_device() and pci_wait_for_pending_transaction()
6117 * flush any in flight DMA operations
6119 void amdgpu_device_halt(struct amdgpu_device *adev)
6121 struct pci_dev *pdev = adev->pdev;
6122 struct drm_device *ddev = adev_to_drm(adev);
6124 amdgpu_xcp_dev_unplug(adev);
6125 drm_dev_unplug(ddev);
6127 amdgpu_irq_disable_all(adev);
6129 amdgpu_fence_driver_hw_fini(adev);
6131 adev->no_hw_access = true;
6133 amdgpu_device_unmap_mmio(adev);
6135 pci_disable_device(pdev);
6136 pci_wait_for_pending_transaction(pdev);
6139 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
6142 unsigned long flags, address, data;
6145 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6146 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6148 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6149 WREG32(address, reg * 4);
6150 (void)RREG32(address);
6152 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6156 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
6159 unsigned long flags, address, data;
6161 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6162 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6164 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6165 WREG32(address, reg * 4);
6166 (void)RREG32(address);
6169 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6173 * amdgpu_device_switch_gang - switch to a new gang
6174 * @adev: amdgpu_device pointer
6175 * @gang: the gang to switch to
6177 * Try to switch to a new gang.
6178 * Returns: NULL if we switched to the new gang or a reference to the current
6181 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
6182 struct dma_fence *gang)
6184 struct dma_fence *old = NULL;
6189 old = dma_fence_get_rcu_safe(&adev->gang_submit);
6195 if (!dma_fence_is_signaled(old))
6198 } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
6205 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
6207 switch (adev->asic_type) {
6208 #ifdef CONFIG_DRM_AMDGPU_SI
6212 /* chips with no display hardware */
6214 #ifdef CONFIG_DRM_AMDGPU_SI
6220 #ifdef CONFIG_DRM_AMDGPU_CIK
6229 case CHIP_POLARIS10:
6230 case CHIP_POLARIS11:
6231 case CHIP_POLARIS12:
6235 /* chips with display hardware */
6239 if (!adev->ip_versions[DCE_HWIP][0] ||
6240 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
6246 uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
6247 uint32_t inst, uint32_t reg_addr, char reg_name[],
6248 uint32_t expected_value, uint32_t mask)
6252 uint32_t tmp_ = RREG32(reg_addr);
6253 uint32_t loop = adev->usec_timeout;
6255 while ((tmp_ & (mask)) != (expected_value)) {
6257 loop = adev->usec_timeout;
6261 tmp_ = RREG32(reg_addr);
6264 DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
6265 inst, reg_name, (uint32_t)expected_value,
6266 (uint32_t)(tmp_ & (mask)));