2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
35 #include <linux/devcoredump.h>
36 #include <generated/utsrelease.h>
37 #include <linux/pci-p2pdma.h>
38 #include <linux/apple-gmux.h>
40 #include <drm/drm_aperture.h>
41 #include <drm/drm_atomic_helper.h>
42 #include <drm/drm_crtc_helper.h>
43 #include <drm/drm_fb_helper.h>
44 #include <drm/drm_probe_helper.h>
45 #include <drm/amdgpu_drm.h>
46 #include <linux/vgaarb.h>
47 #include <linux/vga_switcheroo.h>
48 #include <linux/efi.h>
50 #include "amdgpu_trace.h"
51 #include "amdgpu_i2c.h"
53 #include "amdgpu_atombios.h"
54 #include "amdgpu_atomfirmware.h"
56 #ifdef CONFIG_DRM_AMDGPU_SI
59 #ifdef CONFIG_DRM_AMDGPU_CIK
65 #include "bif/bif_4_1_d.h"
66 #include <linux/firmware.h>
67 #include "amdgpu_vf_error.h"
69 #include "amdgpu_amdkfd.h"
70 #include "amdgpu_pm.h"
72 #include "amdgpu_xgmi.h"
73 #include "amdgpu_ras.h"
74 #include "amdgpu_pmu.h"
75 #include "amdgpu_fru_eeprom.h"
76 #include "amdgpu_reset.h"
78 #include <linux/suspend.h>
79 #include <drm/task_barrier.h>
80 #include <linux/pm_runtime.h>
82 #include <drm/drm_drv.h>
84 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
85 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
86 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
87 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
88 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
89 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
90 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
92 #define AMDGPU_RESUME_MS 2000
93 #define AMDGPU_MAX_RETRY_LIMIT 2
94 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
96 static const struct drm_driver amdgpu_kms_driver;
98 const char *amdgpu_asic_name[] = {
140 * DOC: pcie_replay_count
142 * The amdgpu driver provides a sysfs API for reporting the total number
143 * of PCIe replays (NAKs)
144 * The file pcie_replay_count is used for this and returns the total
145 * number of replays as a sum of the NAKs generated and NAKs received
148 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
149 struct device_attribute *attr, char *buf)
151 struct drm_device *ddev = dev_get_drvdata(dev);
152 struct amdgpu_device *adev = drm_to_adev(ddev);
153 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
155 return sysfs_emit(buf, "%llu\n", cnt);
158 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
159 amdgpu_device_get_pcie_replay_count, NULL);
161 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
166 * The amdgpu driver provides a sysfs API for reporting the product name
168 * The file product_name is used for this and returns the product name
169 * as returned from the FRU.
170 * NOTE: This is only available for certain server cards
173 static ssize_t amdgpu_device_get_product_name(struct device *dev,
174 struct device_attribute *attr, char *buf)
176 struct drm_device *ddev = dev_get_drvdata(dev);
177 struct amdgpu_device *adev = drm_to_adev(ddev);
179 return sysfs_emit(buf, "%s\n", adev->product_name);
182 static DEVICE_ATTR(product_name, S_IRUGO,
183 amdgpu_device_get_product_name, NULL);
186 * DOC: product_number
188 * The amdgpu driver provides a sysfs API for reporting the part number
190 * The file product_number is used for this and returns the part number
191 * as returned from the FRU.
192 * NOTE: This is only available for certain server cards
195 static ssize_t amdgpu_device_get_product_number(struct device *dev,
196 struct device_attribute *attr, char *buf)
198 struct drm_device *ddev = dev_get_drvdata(dev);
199 struct amdgpu_device *adev = drm_to_adev(ddev);
201 return sysfs_emit(buf, "%s\n", adev->product_number);
204 static DEVICE_ATTR(product_number, S_IRUGO,
205 amdgpu_device_get_product_number, NULL);
210 * The amdgpu driver provides a sysfs API for reporting the serial number
212 * The file serial_number is used for this and returns the serial number
213 * as returned from the FRU.
214 * NOTE: This is only available for certain server cards
217 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
218 struct device_attribute *attr, char *buf)
220 struct drm_device *ddev = dev_get_drvdata(dev);
221 struct amdgpu_device *adev = drm_to_adev(ddev);
223 return sysfs_emit(buf, "%s\n", adev->serial);
226 static DEVICE_ATTR(serial_number, S_IRUGO,
227 amdgpu_device_get_serial_number, NULL);
230 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
232 * @dev: drm_device pointer
234 * Returns true if the device is a dGPU with ATPX power control,
235 * otherwise return false.
237 bool amdgpu_device_supports_px(struct drm_device *dev)
239 struct amdgpu_device *adev = drm_to_adev(dev);
241 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
247 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
249 * @dev: drm_device pointer
251 * Returns true if the device is a dGPU with ACPI power control,
252 * otherwise return false.
254 bool amdgpu_device_supports_boco(struct drm_device *dev)
256 struct amdgpu_device *adev = drm_to_adev(dev);
259 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
265 * amdgpu_device_supports_baco - Does the device support BACO
267 * @dev: drm_device pointer
269 * Returns true if the device supporte BACO,
270 * otherwise return false.
272 bool amdgpu_device_supports_baco(struct drm_device *dev)
274 struct amdgpu_device *adev = drm_to_adev(dev);
276 return amdgpu_asic_supports_baco(adev);
280 * amdgpu_device_supports_smart_shift - Is the device dGPU with
281 * smart shift support
283 * @dev: drm_device pointer
285 * Returns true if the device is a dGPU with Smart Shift support,
286 * otherwise returns false.
288 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
290 return (amdgpu_device_supports_boco(dev) &&
291 amdgpu_acpi_is_power_shift_control_supported());
295 * VRAM access helper functions
299 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
301 * @adev: amdgpu_device pointer
302 * @pos: offset of the buffer in vram
303 * @buf: virtual address of the buffer in system memory
304 * @size: read/write size, sizeof(@buf) must > @size
305 * @write: true - write to vram, otherwise - read from vram
307 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
308 void *buf, size_t size, bool write)
311 uint32_t hi = ~0, tmp = 0;
312 uint32_t *data = buf;
316 if (!drm_dev_enter(adev_to_drm(adev), &idx))
319 BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
321 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
322 for (last = pos + size; pos < last; pos += 4) {
325 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
327 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
331 WREG32_NO_KIQ(mmMM_DATA, *data++);
333 *data++ = RREG32_NO_KIQ(mmMM_DATA);
336 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
341 * amdgpu_device_aper_access - access vram by vram aperature
343 * @adev: amdgpu_device pointer
344 * @pos: offset of the buffer in vram
345 * @buf: virtual address of the buffer in system memory
346 * @size: read/write size, sizeof(@buf) must > @size
347 * @write: true - write to vram, otherwise - read from vram
349 * The return value means how many bytes have been transferred.
351 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
352 void *buf, size_t size, bool write)
359 if (!adev->mman.aper_base_kaddr)
362 last = min(pos + size, adev->gmc.visible_vram_size);
364 addr = adev->mman.aper_base_kaddr + pos;
368 memcpy_toio(addr, buf, count);
370 amdgpu_device_flush_hdp(adev, NULL);
372 amdgpu_device_invalidate_hdp(adev, NULL);
374 memcpy_fromio(buf, addr, count);
386 * amdgpu_device_vram_access - read/write a buffer in vram
388 * @adev: amdgpu_device pointer
389 * @pos: offset of the buffer in vram
390 * @buf: virtual address of the buffer in system memory
391 * @size: read/write size, sizeof(@buf) must > @size
392 * @write: true - write to vram, otherwise - read from vram
394 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
395 void *buf, size_t size, bool write)
399 /* try to using vram apreature to access vram first */
400 count = amdgpu_device_aper_access(adev, pos, buf, size, write);
403 /* using MM to access rest vram */
406 amdgpu_device_mm_access(adev, pos, buf, size, write);
411 * register access helper functions.
414 /* Check if hw access should be skipped because of hotplug or device error */
415 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
417 if (adev->no_hw_access)
420 #ifdef CONFIG_LOCKDEP
422 * This is a bit complicated to understand, so worth a comment. What we assert
423 * here is that the GPU reset is not running on another thread in parallel.
425 * For this we trylock the read side of the reset semaphore, if that succeeds
426 * we know that the reset is not running in paralell.
428 * If the trylock fails we assert that we are either already holding the read
429 * side of the lock or are the reset thread itself and hold the write side of
433 if (down_read_trylock(&adev->reset_domain->sem))
434 up_read(&adev->reset_domain->sem);
436 lockdep_assert_held(&adev->reset_domain->sem);
443 * amdgpu_device_rreg - read a memory mapped IO or indirect register
445 * @adev: amdgpu_device pointer
446 * @reg: dword aligned register offset
447 * @acc_flags: access flags which require special behavior
449 * Returns the 32 bit value from the offset specified.
451 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
452 uint32_t reg, uint32_t acc_flags)
456 if (amdgpu_device_skip_hw_access(adev))
459 if ((reg * 4) < adev->rmmio_size) {
460 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
461 amdgpu_sriov_runtime(adev) &&
462 down_read_trylock(&adev->reset_domain->sem)) {
463 ret = amdgpu_kiq_rreg(adev, reg);
464 up_read(&adev->reset_domain->sem);
466 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
469 ret = adev->pcie_rreg(adev, reg * 4);
472 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
478 * MMIO register read with bytes helper functions
479 * @offset:bytes offset from MMIO start
484 * amdgpu_mm_rreg8 - read a memory mapped IO register
486 * @adev: amdgpu_device pointer
487 * @offset: byte aligned register offset
489 * Returns the 8 bit value from the offset specified.
491 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
493 if (amdgpu_device_skip_hw_access(adev))
496 if (offset < adev->rmmio_size)
497 return (readb(adev->rmmio + offset));
502 * MMIO register write with bytes helper functions
503 * @offset:bytes offset from MMIO start
504 * @value: the value want to be written to the register
508 * amdgpu_mm_wreg8 - read a memory mapped IO register
510 * @adev: amdgpu_device pointer
511 * @offset: byte aligned register offset
512 * @value: 8 bit value to write
514 * Writes the value specified to the offset specified.
516 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
518 if (amdgpu_device_skip_hw_access(adev))
521 if (offset < adev->rmmio_size)
522 writeb(value, adev->rmmio + offset);
528 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
530 * @adev: amdgpu_device pointer
531 * @reg: dword aligned register offset
532 * @v: 32 bit value to write to the register
533 * @acc_flags: access flags which require special behavior
535 * Writes the value specified to the offset specified.
537 void amdgpu_device_wreg(struct amdgpu_device *adev,
538 uint32_t reg, uint32_t v,
541 if (amdgpu_device_skip_hw_access(adev))
544 if ((reg * 4) < adev->rmmio_size) {
545 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
546 amdgpu_sriov_runtime(adev) &&
547 down_read_trylock(&adev->reset_domain->sem)) {
548 amdgpu_kiq_wreg(adev, reg, v);
549 up_read(&adev->reset_domain->sem);
551 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
554 adev->pcie_wreg(adev, reg * 4, v);
557 trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
561 * amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if in range
563 * @adev: amdgpu_device pointer
564 * @reg: mmio/rlc register
567 * this function is invoked only for the debugfs register access
569 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
570 uint32_t reg, uint32_t v)
572 if (amdgpu_device_skip_hw_access(adev))
575 if (amdgpu_sriov_fullaccess(adev) &&
576 adev->gfx.rlc.funcs &&
577 adev->gfx.rlc.funcs->is_rlcg_access_range) {
578 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
579 return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
580 } else if ((reg * 4) >= adev->rmmio_size) {
581 adev->pcie_wreg(adev, reg * 4, v);
583 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
588 * amdgpu_mm_rdoorbell - read a doorbell dword
590 * @adev: amdgpu_device pointer
591 * @index: doorbell index
593 * Returns the value in the doorbell aperture at the
594 * requested doorbell index (CIK).
596 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
598 if (amdgpu_device_skip_hw_access(adev))
601 if (index < adev->doorbell.num_doorbells) {
602 return readl(adev->doorbell.ptr + index);
604 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
610 * amdgpu_mm_wdoorbell - write a doorbell dword
612 * @adev: amdgpu_device pointer
613 * @index: doorbell index
616 * Writes @v to the doorbell aperture at the
617 * requested doorbell index (CIK).
619 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
621 if (amdgpu_device_skip_hw_access(adev))
624 if (index < adev->doorbell.num_doorbells) {
625 writel(v, adev->doorbell.ptr + index);
627 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
632 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
634 * @adev: amdgpu_device pointer
635 * @index: doorbell index
637 * Returns the value in the doorbell aperture at the
638 * requested doorbell index (VEGA10+).
640 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
642 if (amdgpu_device_skip_hw_access(adev))
645 if (index < adev->doorbell.num_doorbells) {
646 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
648 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
654 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
656 * @adev: amdgpu_device pointer
657 * @index: doorbell index
660 * Writes @v to the doorbell aperture at the
661 * requested doorbell index (VEGA10+).
663 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
665 if (amdgpu_device_skip_hw_access(adev))
668 if (index < adev->doorbell.num_doorbells) {
669 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
671 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
676 * amdgpu_device_indirect_rreg - read an indirect register
678 * @adev: amdgpu_device pointer
679 * @reg_addr: indirect register address to read from
681 * Returns the value of indirect register @reg_addr
683 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
686 unsigned long flags, pcie_index, pcie_data;
687 void __iomem *pcie_index_offset;
688 void __iomem *pcie_data_offset;
691 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
692 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
694 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
695 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
696 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
698 writel(reg_addr, pcie_index_offset);
699 readl(pcie_index_offset);
700 r = readl(pcie_data_offset);
701 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
707 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
709 * @adev: amdgpu_device pointer
710 * @reg_addr: indirect register address to read from
712 * Returns the value of indirect register @reg_addr
714 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
717 unsigned long flags, pcie_index, pcie_data;
718 void __iomem *pcie_index_offset;
719 void __iomem *pcie_data_offset;
722 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
723 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
725 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
726 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
727 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
729 /* read low 32 bits */
730 writel(reg_addr, pcie_index_offset);
731 readl(pcie_index_offset);
732 r = readl(pcie_data_offset);
733 /* read high 32 bits */
734 writel(reg_addr + 4, pcie_index_offset);
735 readl(pcie_index_offset);
736 r |= ((u64)readl(pcie_data_offset) << 32);
737 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
743 * amdgpu_device_indirect_wreg - write an indirect register address
745 * @adev: amdgpu_device pointer
746 * @pcie_index: mmio register offset
747 * @pcie_data: mmio register offset
748 * @reg_addr: indirect register offset
749 * @reg_data: indirect register data
752 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
753 u32 reg_addr, u32 reg_data)
755 unsigned long flags, pcie_index, pcie_data;
756 void __iomem *pcie_index_offset;
757 void __iomem *pcie_data_offset;
759 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
760 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
762 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
763 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
764 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
766 writel(reg_addr, pcie_index_offset);
767 readl(pcie_index_offset);
768 writel(reg_data, pcie_data_offset);
769 readl(pcie_data_offset);
770 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
774 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
776 * @adev: amdgpu_device pointer
777 * @pcie_index: mmio register offset
778 * @pcie_data: mmio register offset
779 * @reg_addr: indirect register offset
780 * @reg_data: indirect register data
783 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
784 u32 reg_addr, u64 reg_data)
786 unsigned long flags, pcie_index, pcie_data;
787 void __iomem *pcie_index_offset;
788 void __iomem *pcie_data_offset;
790 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
791 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
793 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
794 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
795 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
797 /* write low 32 bits */
798 writel(reg_addr, pcie_index_offset);
799 readl(pcie_index_offset);
800 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
801 readl(pcie_data_offset);
802 /* write high 32 bits */
803 writel(reg_addr + 4, pcie_index_offset);
804 readl(pcie_index_offset);
805 writel((u32)(reg_data >> 32), pcie_data_offset);
806 readl(pcie_data_offset);
807 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
811 * amdgpu_device_get_rev_id - query device rev_id
813 * @adev: amdgpu_device pointer
815 * Return device rev_id
817 u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
819 return adev->nbio.funcs->get_rev_id(adev);
823 * amdgpu_invalid_rreg - dummy reg read function
825 * @adev: amdgpu_device pointer
826 * @reg: offset of register
828 * Dummy register read function. Used for register blocks
829 * that certain asics don't have (all asics).
830 * Returns the value in the register.
832 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
834 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
840 * amdgpu_invalid_wreg - dummy reg write function
842 * @adev: amdgpu_device pointer
843 * @reg: offset of register
844 * @v: value to write to the register
846 * Dummy register read function. Used for register blocks
847 * that certain asics don't have (all asics).
849 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
851 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
857 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
859 * @adev: amdgpu_device pointer
860 * @reg: offset of register
862 * Dummy register read function. Used for register blocks
863 * that certain asics don't have (all asics).
864 * Returns the value in the register.
866 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
868 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
874 * amdgpu_invalid_wreg64 - dummy reg write function
876 * @adev: amdgpu_device pointer
877 * @reg: offset of register
878 * @v: value to write to the register
880 * Dummy register read function. Used for register blocks
881 * that certain asics don't have (all asics).
883 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
885 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
891 * amdgpu_block_invalid_rreg - dummy reg read function
893 * @adev: amdgpu_device pointer
894 * @block: offset of instance
895 * @reg: offset of register
897 * Dummy register read function. Used for register blocks
898 * that certain asics don't have (all asics).
899 * Returns the value in the register.
901 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
902 uint32_t block, uint32_t reg)
904 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
911 * amdgpu_block_invalid_wreg - dummy reg write function
913 * @adev: amdgpu_device pointer
914 * @block: offset of instance
915 * @reg: offset of register
916 * @v: value to write to the register
918 * Dummy register read function. Used for register blocks
919 * that certain asics don't have (all asics).
921 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
923 uint32_t reg, uint32_t v)
925 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
931 * amdgpu_device_asic_init - Wrapper for atom asic_init
933 * @adev: amdgpu_device pointer
935 * Does any asic specific work and then calls atom asic init.
937 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
939 amdgpu_asic_pre_asic_init(adev);
941 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
942 return amdgpu_atomfirmware_asic_init(adev, true);
944 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
948 * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
950 * @adev: amdgpu_device pointer
952 * Allocates a scratch page of VRAM for use by various things in the
955 static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
957 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
958 AMDGPU_GEM_DOMAIN_VRAM |
959 AMDGPU_GEM_DOMAIN_GTT,
960 &adev->mem_scratch.robj,
961 &adev->mem_scratch.gpu_addr,
962 (void **)&adev->mem_scratch.ptr);
966 * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
968 * @adev: amdgpu_device pointer
970 * Frees the VRAM scratch page.
972 static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
974 amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
978 * amdgpu_device_program_register_sequence - program an array of registers.
980 * @adev: amdgpu_device pointer
981 * @registers: pointer to the register array
982 * @array_size: size of the register array
984 * Programs an array or registers with and and or masks.
985 * This is a helper for setting golden registers.
987 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
988 const u32 *registers,
989 const u32 array_size)
991 u32 tmp, reg, and_mask, or_mask;
997 for (i = 0; i < array_size; i +=3) {
998 reg = registers[i + 0];
999 and_mask = registers[i + 1];
1000 or_mask = registers[i + 2];
1002 if (and_mask == 0xffffffff) {
1007 if (adev->family >= AMDGPU_FAMILY_AI)
1008 tmp |= (or_mask & and_mask);
1017 * amdgpu_device_pci_config_reset - reset the GPU
1019 * @adev: amdgpu_device pointer
1021 * Resets the GPU using the pci config reset sequence.
1022 * Only applicable to asics prior to vega10.
1024 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1026 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1030 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1032 * @adev: amdgpu_device pointer
1034 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1036 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1038 return pci_reset_function(adev->pdev);
1042 * GPU doorbell aperture helpers function.
1045 * amdgpu_device_doorbell_init - Init doorbell driver information.
1047 * @adev: amdgpu_device pointer
1049 * Init doorbell driver information (CIK)
1050 * Returns 0 on success, error on failure.
1052 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1055 /* No doorbell on SI hardware generation */
1056 if (adev->asic_type < CHIP_BONAIRE) {
1057 adev->doorbell.base = 0;
1058 adev->doorbell.size = 0;
1059 adev->doorbell.num_doorbells = 0;
1060 adev->doorbell.ptr = NULL;
1064 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1067 amdgpu_asic_init_doorbell_index(adev);
1069 /* doorbell bar mapping */
1070 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1071 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1073 if (adev->enable_mes) {
1074 adev->doorbell.num_doorbells =
1075 adev->doorbell.size / sizeof(u32);
1077 adev->doorbell.num_doorbells =
1078 min_t(u32, adev->doorbell.size / sizeof(u32),
1079 adev->doorbell_index.max_assignment+1);
1080 if (adev->doorbell.num_doorbells == 0)
1083 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
1084 * paging queue doorbell use the second page. The
1085 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1086 * doorbells are in the first page. So with paging queue enabled,
1087 * the max num_doorbells should + 1 page (0x400 in dword)
1089 if (adev->asic_type >= CHIP_VEGA10)
1090 adev->doorbell.num_doorbells += 0x400;
1093 adev->doorbell.ptr = ioremap(adev->doorbell.base,
1094 adev->doorbell.num_doorbells *
1096 if (adev->doorbell.ptr == NULL)
1103 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1105 * @adev: amdgpu_device pointer
1107 * Tear down doorbell driver information (CIK)
1109 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1111 iounmap(adev->doorbell.ptr);
1112 adev->doorbell.ptr = NULL;
1118 * amdgpu_device_wb_*()
1119 * Writeback is the method by which the GPU updates special pages in memory
1120 * with the status of certain GPU events (fences, ring pointers,etc.).
1124 * amdgpu_device_wb_fini - Disable Writeback and free memory
1126 * @adev: amdgpu_device pointer
1128 * Disables Writeback and frees the Writeback memory (all asics).
1129 * Used at driver shutdown.
1131 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1133 if (adev->wb.wb_obj) {
1134 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1136 (void **)&adev->wb.wb);
1137 adev->wb.wb_obj = NULL;
1142 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1144 * @adev: amdgpu_device pointer
1146 * Initializes writeback and allocates writeback memory (all asics).
1147 * Used at driver startup.
1148 * Returns 0 on success or an -error on failure.
1150 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1154 if (adev->wb.wb_obj == NULL) {
1155 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1156 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1157 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1158 &adev->wb.wb_obj, &adev->wb.gpu_addr,
1159 (void **)&adev->wb.wb);
1161 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1165 adev->wb.num_wb = AMDGPU_MAX_WB;
1166 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1168 /* clear wb memory */
1169 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1176 * amdgpu_device_wb_get - Allocate a wb entry
1178 * @adev: amdgpu_device pointer
1181 * Allocate a wb slot for use by the driver (all asics).
1182 * Returns 0 on success or -EINVAL on failure.
1184 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1186 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1188 if (offset < adev->wb.num_wb) {
1189 __set_bit(offset, adev->wb.used);
1190 *wb = offset << 3; /* convert to dw offset */
1198 * amdgpu_device_wb_free - Free a wb entry
1200 * @adev: amdgpu_device pointer
1203 * Free a wb slot allocated for use by the driver (all asics)
1205 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1208 if (wb < adev->wb.num_wb)
1209 __clear_bit(wb, adev->wb.used);
1213 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1215 * @adev: amdgpu_device pointer
1217 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1218 * to fail, but if any of the BARs is not accessible after the size we abort
1219 * driver loading by returning -ENODEV.
1221 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1223 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1224 struct pci_bus *root;
1225 struct resource *res;
1231 if (amdgpu_sriov_vf(adev))
1234 /* skip if the bios has already enabled large BAR */
1235 if (adev->gmc.real_vram_size &&
1236 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1239 /* Check if the root BUS has 64bit memory resources */
1240 root = adev->pdev->bus;
1241 while (root->parent)
1242 root = root->parent;
1244 pci_bus_for_each_resource(root, res, i) {
1245 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1246 res->start > 0x100000000ull)
1250 /* Trying to resize is pointless without a root hub window above 4GB */
1254 /* Limit the BAR size to what is available */
1255 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1258 /* Disable memory decoding while we change the BAR addresses and size */
1259 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1260 pci_write_config_word(adev->pdev, PCI_COMMAND,
1261 cmd & ~PCI_COMMAND_MEMORY);
1263 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1264 amdgpu_device_doorbell_fini(adev);
1265 if (adev->asic_type >= CHIP_BONAIRE)
1266 pci_release_resource(adev->pdev, 2);
1268 pci_release_resource(adev->pdev, 0);
1270 r = pci_resize_resource(adev->pdev, 0, rbar_size);
1272 DRM_INFO("Not enough PCI address space for a large BAR.");
1273 else if (r && r != -ENOTSUPP)
1274 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1276 pci_assign_unassigned_bus_resources(adev->pdev->bus);
1278 /* When the doorbell or fb BAR isn't available we have no chance of
1281 r = amdgpu_device_doorbell_init(adev);
1282 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1285 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1291 * GPU helpers function.
1294 * amdgpu_device_need_post - check if the hw need post or not
1296 * @adev: amdgpu_device pointer
1298 * Check if the asic has been initialized (all asics) at driver startup
1299 * or post is needed if hw reset is performed.
1300 * Returns true if need or false if not.
1302 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1306 if (amdgpu_sriov_vf(adev))
1309 if (amdgpu_passthrough(adev)) {
1310 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1311 * some old smc fw still need driver do vPost otherwise gpu hang, while
1312 * those smc fw version above 22.15 doesn't have this flaw, so we force
1313 * vpost executed for smc version below 22.15
1315 if (adev->asic_type == CHIP_FIJI) {
1318 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1319 /* force vPost if error occured */
1323 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1324 if (fw_ver < 0x00160e00)
1329 /* Don't post if we need to reset whole hive on init */
1330 if (adev->gmc.xgmi.pending_reset)
1333 if (adev->has_hw_reset) {
1334 adev->has_hw_reset = false;
1338 /* bios scratch used on CIK+ */
1339 if (adev->asic_type >= CHIP_BONAIRE)
1340 return amdgpu_atombios_scratch_need_asic_init(adev);
1342 /* check MEM_SIZE for older asics */
1343 reg = amdgpu_asic_get_config_memsize(adev);
1345 if ((reg != 0) && (reg != 0xffffffff))
1352 * amdgpu_device_should_use_aspm - check if the device should program ASPM
1354 * @adev: amdgpu_device pointer
1356 * Confirm whether the module parameter and pcie bridge agree that ASPM should
1357 * be set for this device.
1359 * Returns true if it should be used or false if not.
1361 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1363 switch (amdgpu_aspm) {
1373 return pcie_aspm_enabled(adev->pdev);
1376 /* if we get transitioned to only one device, take VGA back */
1378 * amdgpu_device_vga_set_decode - enable/disable vga decode
1380 * @pdev: PCI device pointer
1381 * @state: enable/disable vga decode
1383 * Enable/disable vga decode (all asics).
1384 * Returns VGA resource flags.
1386 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1389 struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1390 amdgpu_asic_set_vga_state(adev, state);
1392 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1393 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1395 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1399 * amdgpu_device_check_block_size - validate the vm block size
1401 * @adev: amdgpu_device pointer
1403 * Validates the vm block size specified via module parameter.
1404 * The vm block size defines number of bits in page table versus page directory,
1405 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1406 * page table and the remaining bits are in the page directory.
1408 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1410 /* defines number of bits in page table versus page directory,
1411 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1412 * page table and the remaining bits are in the page directory */
1413 if (amdgpu_vm_block_size == -1)
1416 if (amdgpu_vm_block_size < 9) {
1417 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1418 amdgpu_vm_block_size);
1419 amdgpu_vm_block_size = -1;
1424 * amdgpu_device_check_vm_size - validate the vm size
1426 * @adev: amdgpu_device pointer
1428 * Validates the vm size in GB specified via module parameter.
1429 * The VM size is the size of the GPU virtual memory space in GB.
1431 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1433 /* no need to check the default value */
1434 if (amdgpu_vm_size == -1)
1437 if (amdgpu_vm_size < 1) {
1438 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1440 amdgpu_vm_size = -1;
1444 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1447 bool is_os_64 = (sizeof(void *) == 8);
1448 uint64_t total_memory;
1449 uint64_t dram_size_seven_GB = 0x1B8000000;
1450 uint64_t dram_size_three_GB = 0xB8000000;
1452 if (amdgpu_smu_memory_pool_size == 0)
1456 DRM_WARN("Not 64-bit OS, feature not supported\n");
1460 total_memory = (uint64_t)si.totalram * si.mem_unit;
1462 if ((amdgpu_smu_memory_pool_size == 1) ||
1463 (amdgpu_smu_memory_pool_size == 2)) {
1464 if (total_memory < dram_size_three_GB)
1466 } else if ((amdgpu_smu_memory_pool_size == 4) ||
1467 (amdgpu_smu_memory_pool_size == 8)) {
1468 if (total_memory < dram_size_seven_GB)
1471 DRM_WARN("Smu memory pool size not supported\n");
1474 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1479 DRM_WARN("No enough system memory\n");
1481 adev->pm.smu_prv_buffer_size = 0;
1484 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1486 if (!(adev->flags & AMD_IS_APU) ||
1487 adev->asic_type < CHIP_RAVEN)
1490 switch (adev->asic_type) {
1492 if (adev->pdev->device == 0x15dd)
1493 adev->apu_flags |= AMD_APU_IS_RAVEN;
1494 if (adev->pdev->device == 0x15d8)
1495 adev->apu_flags |= AMD_APU_IS_PICASSO;
1498 if ((adev->pdev->device == 0x1636) ||
1499 (adev->pdev->device == 0x164c))
1500 adev->apu_flags |= AMD_APU_IS_RENOIR;
1502 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1505 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1507 case CHIP_YELLOW_CARP:
1509 case CHIP_CYAN_SKILLFISH:
1510 if ((adev->pdev->device == 0x13FE) ||
1511 (adev->pdev->device == 0x143F))
1512 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1522 * amdgpu_device_check_arguments - validate module params
1524 * @adev: amdgpu_device pointer
1526 * Validates certain module parameters and updates
1527 * the associated values used by the driver (all asics).
1529 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1531 if (amdgpu_sched_jobs < 4) {
1532 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1534 amdgpu_sched_jobs = 4;
1535 } else if (!is_power_of_2(amdgpu_sched_jobs)){
1536 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1538 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1541 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1542 /* gart size must be greater or equal to 32M */
1543 dev_warn(adev->dev, "gart size (%d) too small\n",
1545 amdgpu_gart_size = -1;
1548 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1549 /* gtt size must be greater or equal to 32M */
1550 dev_warn(adev->dev, "gtt size (%d) too small\n",
1552 amdgpu_gtt_size = -1;
1555 /* valid range is between 4 and 9 inclusive */
1556 if (amdgpu_vm_fragment_size != -1 &&
1557 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1558 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1559 amdgpu_vm_fragment_size = -1;
1562 if (amdgpu_sched_hw_submission < 2) {
1563 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1564 amdgpu_sched_hw_submission);
1565 amdgpu_sched_hw_submission = 2;
1566 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1567 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1568 amdgpu_sched_hw_submission);
1569 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1572 if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1573 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1574 amdgpu_reset_method = -1;
1577 amdgpu_device_check_smu_prv_buffer_size(adev);
1579 amdgpu_device_check_vm_size(adev);
1581 amdgpu_device_check_block_size(adev);
1583 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1589 * amdgpu_switcheroo_set_state - set switcheroo state
1591 * @pdev: pci dev pointer
1592 * @state: vga_switcheroo state
1594 * Callback for the switcheroo driver. Suspends or resumes
1595 * the asics before or after it is powered up using ACPI methods.
1597 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1598 enum vga_switcheroo_state state)
1600 struct drm_device *dev = pci_get_drvdata(pdev);
1603 if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1606 if (state == VGA_SWITCHEROO_ON) {
1607 pr_info("switched on\n");
1608 /* don't suspend or resume card normally */
1609 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1611 pci_set_power_state(pdev, PCI_D0);
1612 amdgpu_device_load_pci_state(pdev);
1613 r = pci_enable_device(pdev);
1615 DRM_WARN("pci_enable_device failed (%d)\n", r);
1616 amdgpu_device_resume(dev, true);
1618 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1620 pr_info("switched off\n");
1621 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1622 amdgpu_device_suspend(dev, true);
1623 amdgpu_device_cache_pci_state(pdev);
1624 /* Shut down the device */
1625 pci_disable_device(pdev);
1626 pci_set_power_state(pdev, PCI_D3cold);
1627 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1632 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1634 * @pdev: pci dev pointer
1636 * Callback for the switcheroo driver. Check of the switcheroo
1637 * state can be changed.
1638 * Returns true if the state can be changed, false if not.
1640 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1642 struct drm_device *dev = pci_get_drvdata(pdev);
1645 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1646 * locking inversion with the driver load path. And the access here is
1647 * completely racy anyway. So don't bother with locking for now.
1649 return atomic_read(&dev->open_count) == 0;
1652 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1653 .set_gpu_state = amdgpu_switcheroo_set_state,
1655 .can_switch = amdgpu_switcheroo_can_switch,
1659 * amdgpu_device_ip_set_clockgating_state - set the CG state
1661 * @dev: amdgpu_device pointer
1662 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1663 * @state: clockgating state (gate or ungate)
1665 * Sets the requested clockgating state for all instances of
1666 * the hardware IP specified.
1667 * Returns the error code from the last instance.
1669 int amdgpu_device_ip_set_clockgating_state(void *dev,
1670 enum amd_ip_block_type block_type,
1671 enum amd_clockgating_state state)
1673 struct amdgpu_device *adev = dev;
1676 for (i = 0; i < adev->num_ip_blocks; i++) {
1677 if (!adev->ip_blocks[i].status.valid)
1679 if (adev->ip_blocks[i].version->type != block_type)
1681 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1683 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1684 (void *)adev, state);
1686 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1687 adev->ip_blocks[i].version->funcs->name, r);
1693 * amdgpu_device_ip_set_powergating_state - set the PG state
1695 * @dev: amdgpu_device pointer
1696 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1697 * @state: powergating state (gate or ungate)
1699 * Sets the requested powergating state for all instances of
1700 * the hardware IP specified.
1701 * Returns the error code from the last instance.
1703 int amdgpu_device_ip_set_powergating_state(void *dev,
1704 enum amd_ip_block_type block_type,
1705 enum amd_powergating_state state)
1707 struct amdgpu_device *adev = dev;
1710 for (i = 0; i < adev->num_ip_blocks; i++) {
1711 if (!adev->ip_blocks[i].status.valid)
1713 if (adev->ip_blocks[i].version->type != block_type)
1715 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1717 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1718 (void *)adev, state);
1720 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1721 adev->ip_blocks[i].version->funcs->name, r);
1727 * amdgpu_device_ip_get_clockgating_state - get the CG state
1729 * @adev: amdgpu_device pointer
1730 * @flags: clockgating feature flags
1732 * Walks the list of IPs on the device and updates the clockgating
1733 * flags for each IP.
1734 * Updates @flags with the feature flags for each hardware IP where
1735 * clockgating is enabled.
1737 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1742 for (i = 0; i < adev->num_ip_blocks; i++) {
1743 if (!adev->ip_blocks[i].status.valid)
1745 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1746 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1751 * amdgpu_device_ip_wait_for_idle - wait for idle
1753 * @adev: amdgpu_device pointer
1754 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1756 * Waits for the request hardware IP to be idle.
1757 * Returns 0 for success or a negative error code on failure.
1759 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1760 enum amd_ip_block_type block_type)
1764 for (i = 0; i < adev->num_ip_blocks; i++) {
1765 if (!adev->ip_blocks[i].status.valid)
1767 if (adev->ip_blocks[i].version->type == block_type) {
1768 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1779 * amdgpu_device_ip_is_idle - is the hardware IP idle
1781 * @adev: amdgpu_device pointer
1782 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1784 * Check if the hardware IP is idle or not.
1785 * Returns true if it the IP is idle, false if not.
1787 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1788 enum amd_ip_block_type block_type)
1792 for (i = 0; i < adev->num_ip_blocks; i++) {
1793 if (!adev->ip_blocks[i].status.valid)
1795 if (adev->ip_blocks[i].version->type == block_type)
1796 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1803 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1805 * @adev: amdgpu_device pointer
1806 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1808 * Returns a pointer to the hardware IP block structure
1809 * if it exists for the asic, otherwise NULL.
1811 struct amdgpu_ip_block *
1812 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1813 enum amd_ip_block_type type)
1817 for (i = 0; i < adev->num_ip_blocks; i++)
1818 if (adev->ip_blocks[i].version->type == type)
1819 return &adev->ip_blocks[i];
1825 * amdgpu_device_ip_block_version_cmp
1827 * @adev: amdgpu_device pointer
1828 * @type: enum amd_ip_block_type
1829 * @major: major version
1830 * @minor: minor version
1832 * return 0 if equal or greater
1833 * return 1 if smaller or the ip_block doesn't exist
1835 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1836 enum amd_ip_block_type type,
1837 u32 major, u32 minor)
1839 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1841 if (ip_block && ((ip_block->version->major > major) ||
1842 ((ip_block->version->major == major) &&
1843 (ip_block->version->minor >= minor))))
1850 * amdgpu_device_ip_block_add
1852 * @adev: amdgpu_device pointer
1853 * @ip_block_version: pointer to the IP to add
1855 * Adds the IP block driver information to the collection of IPs
1858 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1859 const struct amdgpu_ip_block_version *ip_block_version)
1861 if (!ip_block_version)
1864 switch (ip_block_version->type) {
1865 case AMD_IP_BLOCK_TYPE_VCN:
1866 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1869 case AMD_IP_BLOCK_TYPE_JPEG:
1870 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1877 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1878 ip_block_version->funcs->name);
1880 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1886 * amdgpu_device_enable_virtual_display - enable virtual display feature
1888 * @adev: amdgpu_device pointer
1890 * Enabled the virtual display feature if the user has enabled it via
1891 * the module parameter virtual_display. This feature provides a virtual
1892 * display hardware on headless boards or in virtualized environments.
1893 * This function parses and validates the configuration string specified by
1894 * the user and configues the virtual display configuration (number of
1895 * virtual connectors, crtcs, etc.) specified.
1897 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1899 adev->enable_virtual_display = false;
1901 if (amdgpu_virtual_display) {
1902 const char *pci_address_name = pci_name(adev->pdev);
1903 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1905 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1906 pciaddstr_tmp = pciaddstr;
1907 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1908 pciaddname = strsep(&pciaddname_tmp, ",");
1909 if (!strcmp("all", pciaddname)
1910 || !strcmp(pci_address_name, pciaddname)) {
1914 adev->enable_virtual_display = true;
1917 res = kstrtol(pciaddname_tmp, 10,
1925 adev->mode_info.num_crtc = num_crtc;
1927 adev->mode_info.num_crtc = 1;
1933 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1934 amdgpu_virtual_display, pci_address_name,
1935 adev->enable_virtual_display, adev->mode_info.num_crtc);
1941 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
1943 if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
1944 adev->mode_info.num_crtc = 1;
1945 adev->enable_virtual_display = true;
1946 DRM_INFO("virtual_display:%d, num_crtc:%d\n",
1947 adev->enable_virtual_display, adev->mode_info.num_crtc);
1952 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1954 * @adev: amdgpu_device pointer
1956 * Parses the asic configuration parameters specified in the gpu info
1957 * firmware and makes them availale to the driver for use in configuring
1959 * Returns 0 on success, -EINVAL on failure.
1961 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1963 const char *chip_name;
1966 const struct gpu_info_firmware_header_v1_0 *hdr;
1968 adev->firmware.gpu_info_fw = NULL;
1970 if (adev->mman.discovery_bin) {
1972 * FIXME: The bounding box is still needed by Navi12, so
1973 * temporarily read it from gpu_info firmware. Should be dropped
1974 * when DAL no longer needs it.
1976 if (adev->asic_type != CHIP_NAVI12)
1980 switch (adev->asic_type) {
1984 chip_name = "vega10";
1987 chip_name = "vega12";
1990 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1991 chip_name = "raven2";
1992 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1993 chip_name = "picasso";
1995 chip_name = "raven";
1998 chip_name = "arcturus";
2001 chip_name = "navi12";
2005 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
2006 err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, fw_name);
2009 "Failed to get gpu_info firmware \"%s\"\n",
2014 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
2015 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
2017 switch (hdr->version_major) {
2020 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2021 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2022 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2025 * Should be droped when DAL no longer needs it.
2027 if (adev->asic_type == CHIP_NAVI12)
2028 goto parse_soc_bounding_box;
2030 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2031 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2032 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2033 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2034 adev->gfx.config.max_texture_channel_caches =
2035 le32_to_cpu(gpu_info_fw->gc_num_tccs);
2036 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2037 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2038 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2039 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2040 adev->gfx.config.double_offchip_lds_buf =
2041 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2042 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2043 adev->gfx.cu_info.max_waves_per_simd =
2044 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2045 adev->gfx.cu_info.max_scratch_slots_per_cu =
2046 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2047 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2048 if (hdr->version_minor >= 1) {
2049 const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2050 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2051 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2052 adev->gfx.config.num_sc_per_sh =
2053 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2054 adev->gfx.config.num_packer_per_sc =
2055 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2058 parse_soc_bounding_box:
2060 * soc bounding box info is not integrated in disocovery table,
2061 * we always need to parse it from gpu info firmware if needed.
2063 if (hdr->version_minor == 2) {
2064 const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2065 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2066 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2067 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2073 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2082 * amdgpu_device_ip_early_init - run early init for hardware IPs
2084 * @adev: amdgpu_device pointer
2086 * Early initialization pass for hardware IPs. The hardware IPs that make
2087 * up each asic are discovered each IP's early_init callback is run. This
2088 * is the first stage in initializing the asic.
2089 * Returns 0 on success, negative error code on failure.
2091 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2093 struct drm_device *dev = adev_to_drm(adev);
2094 struct pci_dev *parent;
2098 amdgpu_device_enable_virtual_display(adev);
2100 if (amdgpu_sriov_vf(adev)) {
2101 r = amdgpu_virt_request_full_gpu(adev, true);
2106 switch (adev->asic_type) {
2107 #ifdef CONFIG_DRM_AMDGPU_SI
2113 adev->family = AMDGPU_FAMILY_SI;
2114 r = si_set_ip_blocks(adev);
2119 #ifdef CONFIG_DRM_AMDGPU_CIK
2125 if (adev->flags & AMD_IS_APU)
2126 adev->family = AMDGPU_FAMILY_KV;
2128 adev->family = AMDGPU_FAMILY_CI;
2130 r = cik_set_ip_blocks(adev);
2138 case CHIP_POLARIS10:
2139 case CHIP_POLARIS11:
2140 case CHIP_POLARIS12:
2144 if (adev->flags & AMD_IS_APU)
2145 adev->family = AMDGPU_FAMILY_CZ;
2147 adev->family = AMDGPU_FAMILY_VI;
2149 r = vi_set_ip_blocks(adev);
2154 r = amdgpu_discovery_set_ip_blocks(adev);
2160 if (amdgpu_has_atpx() &&
2161 (amdgpu_is_atpx_hybrid() ||
2162 amdgpu_has_atpx_dgpu_power_cntl()) &&
2163 ((adev->flags & AMD_IS_APU) == 0) &&
2164 !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
2165 adev->flags |= AMD_IS_PX;
2167 if (!(adev->flags & AMD_IS_APU)) {
2168 parent = pci_upstream_bridge(adev->pdev);
2169 adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2172 amdgpu_amdkfd_device_probe(adev);
2174 adev->pm.pp_feature = amdgpu_pp_feature_mask;
2175 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2176 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2177 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2178 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2181 for (i = 0; i < adev->num_ip_blocks; i++) {
2182 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2183 DRM_ERROR("disabled ip block: %d <%s>\n",
2184 i, adev->ip_blocks[i].version->funcs->name);
2185 adev->ip_blocks[i].status.valid = false;
2187 if (adev->ip_blocks[i].version->funcs->early_init) {
2188 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2190 adev->ip_blocks[i].status.valid = false;
2192 DRM_ERROR("early_init of IP block <%s> failed %d\n",
2193 adev->ip_blocks[i].version->funcs->name, r);
2196 adev->ip_blocks[i].status.valid = true;
2199 adev->ip_blocks[i].status.valid = true;
2202 /* get the vbios after the asic_funcs are set up */
2203 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2204 r = amdgpu_device_parse_gpu_info_fw(adev);
2209 if (!amdgpu_get_bios(adev))
2212 r = amdgpu_atombios_init(adev);
2214 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2215 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2219 /*get pf2vf msg info at it's earliest time*/
2220 if (amdgpu_sriov_vf(adev))
2221 amdgpu_virt_init_data_exchange(adev);
2228 adev->cg_flags &= amdgpu_cg_mask;
2229 adev->pg_flags &= amdgpu_pg_mask;
2234 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2238 for (i = 0; i < adev->num_ip_blocks; i++) {
2239 if (!adev->ip_blocks[i].status.sw)
2241 if (adev->ip_blocks[i].status.hw)
2243 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2244 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2245 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2246 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2248 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2249 adev->ip_blocks[i].version->funcs->name, r);
2252 adev->ip_blocks[i].status.hw = true;
2259 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2263 for (i = 0; i < adev->num_ip_blocks; i++) {
2264 if (!adev->ip_blocks[i].status.sw)
2266 if (adev->ip_blocks[i].status.hw)
2268 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2270 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2271 adev->ip_blocks[i].version->funcs->name, r);
2274 adev->ip_blocks[i].status.hw = true;
2280 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2284 uint32_t smu_version;
2286 if (adev->asic_type >= CHIP_VEGA10) {
2287 for (i = 0; i < adev->num_ip_blocks; i++) {
2288 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2291 if (!adev->ip_blocks[i].status.sw)
2294 /* no need to do the fw loading again if already done*/
2295 if (adev->ip_blocks[i].status.hw == true)
2298 if (amdgpu_in_reset(adev) || adev->in_suspend) {
2299 r = adev->ip_blocks[i].version->funcs->resume(adev);
2301 DRM_ERROR("resume of IP block <%s> failed %d\n",
2302 adev->ip_blocks[i].version->funcs->name, r);
2306 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2308 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2309 adev->ip_blocks[i].version->funcs->name, r);
2314 adev->ip_blocks[i].status.hw = true;
2319 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2320 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2325 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2330 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2331 struct amdgpu_ring *ring = adev->rings[i];
2333 /* No need to setup the GPU scheduler for rings that don't need it */
2334 if (!ring || ring->no_scheduler)
2337 switch (ring->funcs->type) {
2338 case AMDGPU_RING_TYPE_GFX:
2339 timeout = adev->gfx_timeout;
2341 case AMDGPU_RING_TYPE_COMPUTE:
2342 timeout = adev->compute_timeout;
2344 case AMDGPU_RING_TYPE_SDMA:
2345 timeout = adev->sdma_timeout;
2348 timeout = adev->video_timeout;
2352 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
2353 ring->num_hw_submission, amdgpu_job_hang_limit,
2354 timeout, adev->reset_domain->wq,
2355 ring->sched_score, ring->name,
2358 DRM_ERROR("Failed to create scheduler on ring %s.\n",
2369 * amdgpu_device_ip_init - run init for hardware IPs
2371 * @adev: amdgpu_device pointer
2373 * Main initialization pass for hardware IPs. The list of all the hardware
2374 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2375 * are run. sw_init initializes the software state associated with each IP
2376 * and hw_init initializes the hardware associated with each IP.
2377 * Returns 0 on success, negative error code on failure.
2379 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2383 r = amdgpu_ras_init(adev);
2387 for (i = 0; i < adev->num_ip_blocks; i++) {
2388 if (!adev->ip_blocks[i].status.valid)
2390 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2392 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2393 adev->ip_blocks[i].version->funcs->name, r);
2396 adev->ip_blocks[i].status.sw = true;
2398 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2399 /* need to do common hw init early so everything is set up for gmc */
2400 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2402 DRM_ERROR("hw_init %d failed %d\n", i, r);
2405 adev->ip_blocks[i].status.hw = true;
2406 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2407 /* need to do gmc hw init early so we can allocate gpu mem */
2408 /* Try to reserve bad pages early */
2409 if (amdgpu_sriov_vf(adev))
2410 amdgpu_virt_exchange_data(adev);
2412 r = amdgpu_device_mem_scratch_init(adev);
2414 DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
2417 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2419 DRM_ERROR("hw_init %d failed %d\n", i, r);
2422 r = amdgpu_device_wb_init(adev);
2424 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2427 adev->ip_blocks[i].status.hw = true;
2429 /* right after GMC hw init, we create CSA */
2431 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2432 AMDGPU_GEM_DOMAIN_VRAM |
2433 AMDGPU_GEM_DOMAIN_GTT,
2436 DRM_ERROR("allocate CSA failed %d\n", r);
2443 if (amdgpu_sriov_vf(adev))
2444 amdgpu_virt_init_data_exchange(adev);
2446 r = amdgpu_ib_pool_init(adev);
2448 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2449 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2453 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2457 r = amdgpu_device_ip_hw_init_phase1(adev);
2461 r = amdgpu_device_fw_loading(adev);
2465 r = amdgpu_device_ip_hw_init_phase2(adev);
2470 * retired pages will be loaded from eeprom and reserved here,
2471 * it should be called after amdgpu_device_ip_hw_init_phase2 since
2472 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2473 * for I2C communication which only true at this point.
2475 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2476 * failure from bad gpu situation and stop amdgpu init process
2477 * accordingly. For other failed cases, it will still release all
2478 * the resource and print error message, rather than returning one
2479 * negative value to upper level.
2481 * Note: theoretically, this should be called before all vram allocations
2482 * to protect retired page from abusing
2484 r = amdgpu_ras_recovery_init(adev);
2489 * In case of XGMI grab extra reference for reset domain for this device
2491 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2492 if (amdgpu_xgmi_add_device(adev) == 0) {
2493 if (!amdgpu_sriov_vf(adev)) {
2494 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2496 if (WARN_ON(!hive)) {
2501 if (!hive->reset_domain ||
2502 !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2504 amdgpu_put_xgmi_hive(hive);
2508 /* Drop the early temporary reset domain we created for device */
2509 amdgpu_reset_put_reset_domain(adev->reset_domain);
2510 adev->reset_domain = hive->reset_domain;
2511 amdgpu_put_xgmi_hive(hive);
2516 r = amdgpu_device_init_schedulers(adev);
2520 /* Don't init kfd if whole hive need to be reset during init */
2521 if (!adev->gmc.xgmi.pending_reset)
2522 amdgpu_amdkfd_device_init(adev);
2524 amdgpu_fru_get_product_info(adev);
2527 if (amdgpu_sriov_vf(adev))
2528 amdgpu_virt_release_full_gpu(adev, true);
2534 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2536 * @adev: amdgpu_device pointer
2538 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
2539 * this function before a GPU reset. If the value is retained after a
2540 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
2542 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2544 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2548 * amdgpu_device_check_vram_lost - check if vram is valid
2550 * @adev: amdgpu_device pointer
2552 * Checks the reset magic value written to the gart pointer in VRAM.
2553 * The driver calls this after a GPU reset to see if the contents of
2554 * VRAM is lost or now.
2555 * returns true if vram is lost, false if not.
2557 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2559 if (memcmp(adev->gart.ptr, adev->reset_magic,
2560 AMDGPU_RESET_MAGIC_NUM))
2563 if (!amdgpu_in_reset(adev))
2567 * For all ASICs with baco/mode1 reset, the VRAM is
2568 * always assumed to be lost.
2570 switch (amdgpu_asic_reset_method(adev)) {
2571 case AMD_RESET_METHOD_BACO:
2572 case AMD_RESET_METHOD_MODE1:
2580 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2582 * @adev: amdgpu_device pointer
2583 * @state: clockgating state (gate or ungate)
2585 * The list of all the hardware IPs that make up the asic is walked and the
2586 * set_clockgating_state callbacks are run.
2587 * Late initialization pass enabling clockgating for hardware IPs.
2588 * Fini or suspend, pass disabling clockgating for hardware IPs.
2589 * Returns 0 on success, negative error code on failure.
2592 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2593 enum amd_clockgating_state state)
2597 if (amdgpu_emu_mode == 1)
2600 for (j = 0; j < adev->num_ip_blocks; j++) {
2601 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2602 if (!adev->ip_blocks[i].status.late_initialized)
2604 /* skip CG for GFX, SDMA on S0ix */
2605 if (adev->in_s0ix &&
2606 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2607 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2609 /* skip CG for VCE/UVD, it's handled specially */
2610 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2611 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2612 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2613 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2614 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2615 /* enable clockgating to save power */
2616 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2619 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2620 adev->ip_blocks[i].version->funcs->name, r);
2629 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2630 enum amd_powergating_state state)
2634 if (amdgpu_emu_mode == 1)
2637 for (j = 0; j < adev->num_ip_blocks; j++) {
2638 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2639 if (!adev->ip_blocks[i].status.late_initialized)
2641 /* skip PG for GFX, SDMA on S0ix */
2642 if (adev->in_s0ix &&
2643 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2644 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2646 /* skip CG for VCE/UVD, it's handled specially */
2647 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2648 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2649 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2650 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2651 adev->ip_blocks[i].version->funcs->set_powergating_state) {
2652 /* enable powergating to save power */
2653 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2656 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2657 adev->ip_blocks[i].version->funcs->name, r);
2665 static int amdgpu_device_enable_mgpu_fan_boost(void)
2667 struct amdgpu_gpu_instance *gpu_ins;
2668 struct amdgpu_device *adev;
2671 mutex_lock(&mgpu_info.mutex);
2674 * MGPU fan boost feature should be enabled
2675 * only when there are two or more dGPUs in
2678 if (mgpu_info.num_dgpu < 2)
2681 for (i = 0; i < mgpu_info.num_dgpu; i++) {
2682 gpu_ins = &(mgpu_info.gpu_ins[i]);
2683 adev = gpu_ins->adev;
2684 if (!(adev->flags & AMD_IS_APU) &&
2685 !gpu_ins->mgpu_fan_enabled) {
2686 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2690 gpu_ins->mgpu_fan_enabled = 1;
2695 mutex_unlock(&mgpu_info.mutex);
2701 * amdgpu_device_ip_late_init - run late init for hardware IPs
2703 * @adev: amdgpu_device pointer
2705 * Late initialization pass for hardware IPs. The list of all the hardware
2706 * IPs that make up the asic is walked and the late_init callbacks are run.
2707 * late_init covers any special initialization that an IP requires
2708 * after all of the have been initialized or something that needs to happen
2709 * late in the init process.
2710 * Returns 0 on success, negative error code on failure.
2712 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2714 struct amdgpu_gpu_instance *gpu_instance;
2717 for (i = 0; i < adev->num_ip_blocks; i++) {
2718 if (!adev->ip_blocks[i].status.hw)
2720 if (adev->ip_blocks[i].version->funcs->late_init) {
2721 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2723 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2724 adev->ip_blocks[i].version->funcs->name, r);
2728 adev->ip_blocks[i].status.late_initialized = true;
2731 r = amdgpu_ras_late_init(adev);
2733 DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2737 amdgpu_ras_set_error_query_ready(adev, true);
2739 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2740 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2742 amdgpu_device_fill_reset_magic(adev);
2744 r = amdgpu_device_enable_mgpu_fan_boost();
2746 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2748 /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2749 if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
2750 adev->asic_type == CHIP_ALDEBARAN ))
2751 amdgpu_dpm_handle_passthrough_sbr(adev, true);
2753 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2754 mutex_lock(&mgpu_info.mutex);
2757 * Reset device p-state to low as this was booted with high.
2759 * This should be performed only after all devices from the same
2760 * hive get initialized.
2762 * However, it's unknown how many device in the hive in advance.
2763 * As this is counted one by one during devices initializations.
2765 * So, we wait for all XGMI interlinked devices initialized.
2766 * This may bring some delays as those devices may come from
2767 * different hives. But that should be OK.
2769 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2770 for (i = 0; i < mgpu_info.num_gpu; i++) {
2771 gpu_instance = &(mgpu_info.gpu_ins[i]);
2772 if (gpu_instance->adev->flags & AMD_IS_APU)
2775 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2776 AMDGPU_XGMI_PSTATE_MIN);
2778 DRM_ERROR("pstate setting failed (%d).\n", r);
2784 mutex_unlock(&mgpu_info.mutex);
2791 * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2793 * @adev: amdgpu_device pointer
2795 * For ASICs need to disable SMC first
2797 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2801 if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2804 for (i = 0; i < adev->num_ip_blocks; i++) {
2805 if (!adev->ip_blocks[i].status.hw)
2807 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2808 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2809 /* XXX handle errors */
2811 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2812 adev->ip_blocks[i].version->funcs->name, r);
2814 adev->ip_blocks[i].status.hw = false;
2820 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2824 for (i = 0; i < adev->num_ip_blocks; i++) {
2825 if (!adev->ip_blocks[i].version->funcs->early_fini)
2828 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2830 DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2831 adev->ip_blocks[i].version->funcs->name, r);
2835 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2836 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2838 amdgpu_amdkfd_suspend(adev, false);
2840 /* Workaroud for ASICs need to disable SMC first */
2841 amdgpu_device_smu_fini_early(adev);
2843 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2844 if (!adev->ip_blocks[i].status.hw)
2847 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2848 /* XXX handle errors */
2850 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2851 adev->ip_blocks[i].version->funcs->name, r);
2854 adev->ip_blocks[i].status.hw = false;
2857 if (amdgpu_sriov_vf(adev)) {
2858 if (amdgpu_virt_release_full_gpu(adev, false))
2859 DRM_ERROR("failed to release exclusive mode on fini\n");
2866 * amdgpu_device_ip_fini - run fini for hardware IPs
2868 * @adev: amdgpu_device pointer
2870 * Main teardown pass for hardware IPs. The list of all the hardware
2871 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2872 * are run. hw_fini tears down the hardware associated with each IP
2873 * and sw_fini tears down any software state associated with each IP.
2874 * Returns 0 on success, negative error code on failure.
2876 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2880 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2881 amdgpu_virt_release_ras_err_handler_data(adev);
2883 if (adev->gmc.xgmi.num_physical_nodes > 1)
2884 amdgpu_xgmi_remove_device(adev);
2886 amdgpu_amdkfd_device_fini_sw(adev);
2888 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2889 if (!adev->ip_blocks[i].status.sw)
2892 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2893 amdgpu_ucode_free_bo(adev);
2894 amdgpu_free_static_csa(&adev->virt.csa_obj);
2895 amdgpu_device_wb_fini(adev);
2896 amdgpu_device_mem_scratch_fini(adev);
2897 amdgpu_ib_pool_fini(adev);
2900 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2901 /* XXX handle errors */
2903 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2904 adev->ip_blocks[i].version->funcs->name, r);
2906 adev->ip_blocks[i].status.sw = false;
2907 adev->ip_blocks[i].status.valid = false;
2910 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2911 if (!adev->ip_blocks[i].status.late_initialized)
2913 if (adev->ip_blocks[i].version->funcs->late_fini)
2914 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2915 adev->ip_blocks[i].status.late_initialized = false;
2918 amdgpu_ras_fini(adev);
2924 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2926 * @work: work_struct.
2928 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2930 struct amdgpu_device *adev =
2931 container_of(work, struct amdgpu_device, delayed_init_work.work);
2934 r = amdgpu_ib_ring_tests(adev);
2936 DRM_ERROR("ib ring test failed (%d).\n", r);
2939 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2941 struct amdgpu_device *adev =
2942 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2944 WARN_ON_ONCE(adev->gfx.gfx_off_state);
2945 WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2947 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2948 adev->gfx.gfx_off_state = true;
2952 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2954 * @adev: amdgpu_device pointer
2956 * Main suspend function for hardware IPs. The list of all the hardware
2957 * IPs that make up the asic is walked, clockgating is disabled and the
2958 * suspend callbacks are run. suspend puts the hardware and software state
2959 * in each IP into a state suitable for suspend.
2960 * Returns 0 on success, negative error code on failure.
2962 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2966 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2967 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2970 * Per PMFW team's suggestion, driver needs to handle gfxoff
2971 * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
2972 * scenario. Add the missing df cstate disablement here.
2974 if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
2975 dev_warn(adev->dev, "Failed to disallow df cstate");
2977 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2978 if (!adev->ip_blocks[i].status.valid)
2981 /* displays are handled separately */
2982 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2985 /* XXX handle errors */
2986 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2987 /* XXX handle errors */
2989 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2990 adev->ip_blocks[i].version->funcs->name, r);
2994 adev->ip_blocks[i].status.hw = false;
3001 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
3003 * @adev: amdgpu_device pointer
3005 * Main suspend function for hardware IPs. The list of all the hardware
3006 * IPs that make up the asic is walked, clockgating is disabled and the
3007 * suspend callbacks are run. suspend puts the hardware and software state
3008 * in each IP into a state suitable for suspend.
3009 * Returns 0 on success, negative error code on failure.
3011 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
3016 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
3018 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3019 if (!adev->ip_blocks[i].status.valid)
3021 /* displays are handled in phase1 */
3022 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
3024 /* PSP lost connection when err_event_athub occurs */
3025 if (amdgpu_ras_intr_triggered() &&
3026 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3027 adev->ip_blocks[i].status.hw = false;
3031 /* skip unnecessary suspend if we do not initialize them yet */
3032 if (adev->gmc.xgmi.pending_reset &&
3033 !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3034 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
3035 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3036 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
3037 adev->ip_blocks[i].status.hw = false;
3041 /* skip suspend of gfx/mes and psp for S0ix
3042 * gfx is in gfxoff state, so on resume it will exit gfxoff just
3043 * like at runtime. PSP is also part of the always on hardware
3044 * so no need to suspend it.
3046 if (adev->in_s0ix &&
3047 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3048 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3049 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
3052 /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
3053 if (adev->in_s0ix &&
3054 (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) &&
3055 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
3058 /* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.
3059 * These are in TMR, hence are expected to be reused by PSP-TOS to reload
3060 * from this location and RLC Autoload automatically also gets loaded
3061 * from here based on PMFW -> PSP message during re-init sequence.
3062 * Therefore, the psp suspend & resume should be skipped to avoid destroy
3063 * the TMR and reload FWs again for IMU enabled APU ASICs.
3065 if (amdgpu_in_reset(adev) &&
3066 (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
3067 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3070 /* XXX handle errors */
3071 r = adev->ip_blocks[i].version->funcs->suspend(adev);
3072 /* XXX handle errors */
3074 DRM_ERROR("suspend of IP block <%s> failed %d\n",
3075 adev->ip_blocks[i].version->funcs->name, r);
3077 adev->ip_blocks[i].status.hw = false;
3078 /* handle putting the SMC in the appropriate state */
3079 if(!amdgpu_sriov_vf(adev)){
3080 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3081 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3083 DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3084 adev->mp1_state, r);
3095 * amdgpu_device_ip_suspend - run suspend for hardware IPs
3097 * @adev: amdgpu_device pointer
3099 * Main suspend function for hardware IPs. The list of all the hardware
3100 * IPs that make up the asic is walked, clockgating is disabled and the
3101 * suspend callbacks are run. suspend puts the hardware and software state
3102 * in each IP into a state suitable for suspend.
3103 * Returns 0 on success, negative error code on failure.
3105 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3109 if (amdgpu_sriov_vf(adev)) {
3110 amdgpu_virt_fini_data_exchange(adev);
3111 amdgpu_virt_request_full_gpu(adev, false);
3114 r = amdgpu_device_ip_suspend_phase1(adev);
3117 r = amdgpu_device_ip_suspend_phase2(adev);
3119 if (amdgpu_sriov_vf(adev))
3120 amdgpu_virt_release_full_gpu(adev, false);
3125 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3129 static enum amd_ip_block_type ip_order[] = {
3130 AMD_IP_BLOCK_TYPE_COMMON,
3131 AMD_IP_BLOCK_TYPE_GMC,
3132 AMD_IP_BLOCK_TYPE_PSP,
3133 AMD_IP_BLOCK_TYPE_IH,
3136 for (i = 0; i < adev->num_ip_blocks; i++) {
3138 struct amdgpu_ip_block *block;
3140 block = &adev->ip_blocks[i];
3141 block->status.hw = false;
3143 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3145 if (block->version->type != ip_order[j] ||
3146 !block->status.valid)
3149 r = block->version->funcs->hw_init(adev);
3150 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3153 block->status.hw = true;
3160 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3164 static enum amd_ip_block_type ip_order[] = {
3165 AMD_IP_BLOCK_TYPE_SMC,
3166 AMD_IP_BLOCK_TYPE_DCE,
3167 AMD_IP_BLOCK_TYPE_GFX,
3168 AMD_IP_BLOCK_TYPE_SDMA,
3169 AMD_IP_BLOCK_TYPE_UVD,
3170 AMD_IP_BLOCK_TYPE_VCE,
3171 AMD_IP_BLOCK_TYPE_VCN
3174 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3176 struct amdgpu_ip_block *block;
3178 for (j = 0; j < adev->num_ip_blocks; j++) {
3179 block = &adev->ip_blocks[j];
3181 if (block->version->type != ip_order[i] ||
3182 !block->status.valid ||
3186 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3187 r = block->version->funcs->resume(adev);
3189 r = block->version->funcs->hw_init(adev);
3191 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3194 block->status.hw = true;
3202 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3204 * @adev: amdgpu_device pointer
3206 * First resume function for hardware IPs. The list of all the hardware
3207 * IPs that make up the asic is walked and the resume callbacks are run for
3208 * COMMON, GMC, and IH. resume puts the hardware into a functional state
3209 * after a suspend and updates the software state as necessary. This
3210 * function is also used for restoring the GPU after a GPU reset.
3211 * Returns 0 on success, negative error code on failure.
3213 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3217 for (i = 0; i < adev->num_ip_blocks; i++) {
3218 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3220 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3221 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3222 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3223 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3225 r = adev->ip_blocks[i].version->funcs->resume(adev);
3227 DRM_ERROR("resume of IP block <%s> failed %d\n",
3228 adev->ip_blocks[i].version->funcs->name, r);
3231 adev->ip_blocks[i].status.hw = true;
3239 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3241 * @adev: amdgpu_device pointer
3243 * First resume function for hardware IPs. The list of all the hardware
3244 * IPs that make up the asic is walked and the resume callbacks are run for
3245 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
3246 * functional state after a suspend and updates the software state as
3247 * necessary. This function is also used for restoring the GPU after a GPU
3249 * Returns 0 on success, negative error code on failure.
3251 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3255 for (i = 0; i < adev->num_ip_blocks; i++) {
3256 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3258 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3259 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3260 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3261 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3263 r = adev->ip_blocks[i].version->funcs->resume(adev);
3265 DRM_ERROR("resume of IP block <%s> failed %d\n",
3266 adev->ip_blocks[i].version->funcs->name, r);
3269 adev->ip_blocks[i].status.hw = true;
3276 * amdgpu_device_ip_resume - run resume for hardware IPs
3278 * @adev: amdgpu_device pointer
3280 * Main resume function for hardware IPs. The hardware IPs
3281 * are split into two resume functions because they are
3282 * are also used in in recovering from a GPU reset and some additional
3283 * steps need to be take between them. In this case (S3/S4) they are
3285 * Returns 0 on success, negative error code on failure.
3287 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3291 r = amdgpu_amdkfd_resume_iommu(adev);
3295 r = amdgpu_device_ip_resume_phase1(adev);
3299 r = amdgpu_device_fw_loading(adev);
3303 r = amdgpu_device_ip_resume_phase2(adev);
3309 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3311 * @adev: amdgpu_device pointer
3313 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3315 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3317 if (amdgpu_sriov_vf(adev)) {
3318 if (adev->is_atom_fw) {
3319 if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3320 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3322 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3323 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3326 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3327 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3332 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3334 * @asic_type: AMD asic type
3336 * Check if there is DC (new modesetting infrastructre) support for an asic.
3337 * returns true if DC has support, false if not.
3339 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3341 switch (asic_type) {
3342 #ifdef CONFIG_DRM_AMDGPU_SI
3346 /* chips with no display hardware */
3348 #if defined(CONFIG_DRM_AMD_DC)
3354 * We have systems in the wild with these ASICs that require
3355 * LVDS and VGA support which is not supported with DC.
3357 * Fallback to the non-DC driver here by default so as not to
3358 * cause regressions.
3360 #if defined(CONFIG_DRM_AMD_DC_SI)
3361 return amdgpu_dc > 0;
3370 * We have systems in the wild with these ASICs that require
3371 * VGA support which is not supported with DC.
3373 * Fallback to the non-DC driver here by default so as not to
3374 * cause regressions.
3376 return amdgpu_dc > 0;
3378 return amdgpu_dc != 0;
3382 DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3383 "but isn't supported by ASIC, ignoring\n");
3390 * amdgpu_device_has_dc_support - check if dc is supported
3392 * @adev: amdgpu_device pointer
3394 * Returns true for supported, false for not supported
3396 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3398 if (adev->enable_virtual_display ||
3399 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3402 return amdgpu_device_asic_has_dc_support(adev->asic_type);
3405 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3407 struct amdgpu_device *adev =
3408 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3409 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3411 /* It's a bug to not have a hive within this function */
3416 * Use task barrier to synchronize all xgmi reset works across the
3417 * hive. task_barrier_enter and task_barrier_exit will block
3418 * until all the threads running the xgmi reset works reach
3419 * those points. task_barrier_full will do both blocks.
3421 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3423 task_barrier_enter(&hive->tb);
3424 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3426 if (adev->asic_reset_res)
3429 task_barrier_exit(&hive->tb);
3430 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3432 if (adev->asic_reset_res)
3435 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3436 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3437 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3440 task_barrier_full(&hive->tb);
3441 adev->asic_reset_res = amdgpu_asic_reset(adev);
3445 if (adev->asic_reset_res)
3446 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3447 adev->asic_reset_res, adev_to_drm(adev)->unique);
3448 amdgpu_put_xgmi_hive(hive);
3451 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3453 char *input = amdgpu_lockup_timeout;
3454 char *timeout_setting = NULL;
3460 * By default timeout for non compute jobs is 10000
3461 * and 60000 for compute jobs.
3462 * In SR-IOV or passthrough mode, timeout for compute
3463 * jobs are 60000 by default.
3465 adev->gfx_timeout = msecs_to_jiffies(10000);
3466 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3467 if (amdgpu_sriov_vf(adev))
3468 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3469 msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3471 adev->compute_timeout = msecs_to_jiffies(60000);
3473 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3474 while ((timeout_setting = strsep(&input, ",")) &&
3475 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3476 ret = kstrtol(timeout_setting, 0, &timeout);
3483 } else if (timeout < 0) {
3484 timeout = MAX_SCHEDULE_TIMEOUT;
3485 dev_warn(adev->dev, "lockup timeout disabled");
3486 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3488 timeout = msecs_to_jiffies(timeout);
3493 adev->gfx_timeout = timeout;
3496 adev->compute_timeout = timeout;
3499 adev->sdma_timeout = timeout;
3502 adev->video_timeout = timeout;
3509 * There is only one value specified and
3510 * it should apply to all non-compute jobs.
3513 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3514 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3515 adev->compute_timeout = adev->gfx_timeout;
3523 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3525 * @adev: amdgpu_device pointer
3527 * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3529 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3531 struct iommu_domain *domain;
3533 domain = iommu_get_domain_for_dev(adev->dev);
3534 if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3535 adev->ram_is_direct_mapped = true;
3538 static const struct attribute *amdgpu_dev_attributes[] = {
3539 &dev_attr_product_name.attr,
3540 &dev_attr_product_number.attr,
3541 &dev_attr_serial_number.attr,
3542 &dev_attr_pcie_replay_count.attr,
3547 * amdgpu_device_init - initialize the driver
3549 * @adev: amdgpu_device pointer
3550 * @flags: driver flags
3552 * Initializes the driver info and hw (all asics).
3553 * Returns 0 for success or an error on failure.
3554 * Called at driver startup.
3556 int amdgpu_device_init(struct amdgpu_device *adev,
3559 struct drm_device *ddev = adev_to_drm(adev);
3560 struct pci_dev *pdev = adev->pdev;
3565 adev->shutdown = false;
3566 adev->flags = flags;
3568 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3569 adev->asic_type = amdgpu_force_asic_type;
3571 adev->asic_type = flags & AMD_ASIC_MASK;
3573 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3574 if (amdgpu_emu_mode == 1)
3575 adev->usec_timeout *= 10;
3576 adev->gmc.gart_size = 512 * 1024 * 1024;
3577 adev->accel_working = false;
3578 adev->num_rings = 0;
3579 RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3580 adev->mman.buffer_funcs = NULL;
3581 adev->mman.buffer_funcs_ring = NULL;
3582 adev->vm_manager.vm_pte_funcs = NULL;
3583 adev->vm_manager.vm_pte_num_scheds = 0;
3584 adev->gmc.gmc_funcs = NULL;
3585 adev->harvest_ip_mask = 0x0;
3586 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3587 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3589 adev->smc_rreg = &amdgpu_invalid_rreg;
3590 adev->smc_wreg = &amdgpu_invalid_wreg;
3591 adev->pcie_rreg = &amdgpu_invalid_rreg;
3592 adev->pcie_wreg = &amdgpu_invalid_wreg;
3593 adev->pciep_rreg = &amdgpu_invalid_rreg;
3594 adev->pciep_wreg = &amdgpu_invalid_wreg;
3595 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3596 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3597 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3598 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3599 adev->didt_rreg = &amdgpu_invalid_rreg;
3600 adev->didt_wreg = &amdgpu_invalid_wreg;
3601 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3602 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3603 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3604 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3606 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3607 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3608 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3610 /* mutex initialization are all done here so we
3611 * can recall function without having locking issues */
3612 mutex_init(&adev->firmware.mutex);
3613 mutex_init(&adev->pm.mutex);
3614 mutex_init(&adev->gfx.gpu_clock_mutex);
3615 mutex_init(&adev->srbm_mutex);
3616 mutex_init(&adev->gfx.pipe_reserve_mutex);
3617 mutex_init(&adev->gfx.gfx_off_mutex);
3618 mutex_init(&adev->grbm_idx_mutex);
3619 mutex_init(&adev->mn_lock);
3620 mutex_init(&adev->virt.vf_errors.lock);
3621 hash_init(adev->mn_hash);
3622 mutex_init(&adev->psp.mutex);
3623 mutex_init(&adev->notifier_lock);
3624 mutex_init(&adev->pm.stable_pstate_ctx_lock);
3625 mutex_init(&adev->benchmark_mutex);
3627 amdgpu_device_init_apu_flags(adev);
3629 r = amdgpu_device_check_arguments(adev);
3633 spin_lock_init(&adev->mmio_idx_lock);
3634 spin_lock_init(&adev->smc_idx_lock);
3635 spin_lock_init(&adev->pcie_idx_lock);
3636 spin_lock_init(&adev->uvd_ctx_idx_lock);
3637 spin_lock_init(&adev->didt_idx_lock);
3638 spin_lock_init(&adev->gc_cac_idx_lock);
3639 spin_lock_init(&adev->se_cac_idx_lock);
3640 spin_lock_init(&adev->audio_endpt_idx_lock);
3641 spin_lock_init(&adev->mm_stats.lock);
3643 INIT_LIST_HEAD(&adev->shadow_list);
3644 mutex_init(&adev->shadow_list_lock);
3646 INIT_LIST_HEAD(&adev->reset_list);
3648 INIT_LIST_HEAD(&adev->ras_list);
3650 INIT_DELAYED_WORK(&adev->delayed_init_work,
3651 amdgpu_device_delayed_init_work_handler);
3652 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3653 amdgpu_device_delay_enable_gfx_off);
3655 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3657 adev->gfx.gfx_off_req_count = 1;
3658 adev->gfx.gfx_off_residency = 0;
3659 adev->gfx.gfx_off_entrycount = 0;
3660 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3662 atomic_set(&adev->throttling_logging_enabled, 1);
3664 * If throttling continues, logging will be performed every minute
3665 * to avoid log flooding. "-1" is subtracted since the thermal
3666 * throttling interrupt comes every second. Thus, the total logging
3667 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3668 * for throttling interrupt) = 60 seconds.
3670 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3671 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3673 /* Registers mapping */
3674 /* TODO: block userspace mapping of io register */
3675 if (adev->asic_type >= CHIP_BONAIRE) {
3676 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3677 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3679 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3680 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3683 for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3684 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3686 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3687 if (adev->rmmio == NULL) {
3690 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3691 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3693 amdgpu_device_get_pcie_info(adev);
3696 DRM_INFO("MCBP is enabled\n");
3699 * Reset domain needs to be present early, before XGMI hive discovered
3700 * (if any) and intitialized to use reset sem and in_gpu reset flag
3701 * early on during init and before calling to RREG32.
3703 adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3704 if (!adev->reset_domain)
3707 /* detect hw virtualization here */
3708 amdgpu_detect_virtualization(adev);
3710 r = amdgpu_device_get_job_timeout_settings(adev);
3712 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3716 /* early init functions */
3717 r = amdgpu_device_ip_early_init(adev);
3721 /* Get rid of things like offb */
3722 r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
3726 /* Enable TMZ based on IP_VERSION */
3727 amdgpu_gmc_tmz_set(adev);
3729 amdgpu_gmc_noretry_set(adev);
3730 /* Need to get xgmi info early to decide the reset behavior*/
3731 if (adev->gmc.xgmi.supported) {
3732 r = adev->gfxhub.funcs->get_xgmi_info(adev);
3737 /* enable PCIE atomic ops */
3738 if (amdgpu_sriov_vf(adev))
3739 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3740 adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3741 (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3743 adev->have_atomics_support =
3744 !pci_enable_atomic_ops_to_root(adev->pdev,
3745 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3746 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3747 if (!adev->have_atomics_support)
3748 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3750 /* doorbell bar mapping and doorbell index init*/
3751 amdgpu_device_doorbell_init(adev);
3753 if (amdgpu_emu_mode == 1) {
3754 /* post the asic on emulation mode */
3755 emu_soc_asic_init(adev);
3756 goto fence_driver_init;
3759 amdgpu_reset_init(adev);
3761 /* detect if we are with an SRIOV vbios */
3762 amdgpu_device_detect_sriov_bios(adev);
3764 /* check if we need to reset the asic
3765 * E.g., driver was not cleanly unloaded previously, etc.
3767 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3768 if (adev->gmc.xgmi.num_physical_nodes) {
3769 dev_info(adev->dev, "Pending hive reset.\n");
3770 adev->gmc.xgmi.pending_reset = true;
3771 /* Only need to init necessary block for SMU to handle the reset */
3772 for (i = 0; i < adev->num_ip_blocks; i++) {
3773 if (!adev->ip_blocks[i].status.valid)
3775 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3776 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3777 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3778 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3779 DRM_DEBUG("IP %s disabled for hw_init.\n",
3780 adev->ip_blocks[i].version->funcs->name);
3781 adev->ip_blocks[i].status.hw = true;
3785 r = amdgpu_asic_reset(adev);
3787 dev_err(adev->dev, "asic reset on init failed\n");
3793 /* Post card if necessary */
3794 if (amdgpu_device_need_post(adev)) {
3796 dev_err(adev->dev, "no vBIOS found\n");
3800 DRM_INFO("GPU posting now...\n");
3801 r = amdgpu_device_asic_init(adev);
3803 dev_err(adev->dev, "gpu post error!\n");
3808 if (adev->is_atom_fw) {
3809 /* Initialize clocks */
3810 r = amdgpu_atomfirmware_get_clock_info(adev);
3812 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3813 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3817 /* Initialize clocks */
3818 r = amdgpu_atombios_get_clock_info(adev);
3820 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3821 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3824 /* init i2c buses */
3825 if (!amdgpu_device_has_dc_support(adev))
3826 amdgpu_atombios_i2c_init(adev);
3831 r = amdgpu_fence_driver_sw_init(adev);
3833 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3834 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3838 /* init the mode config */
3839 drm_mode_config_init(adev_to_drm(adev));
3841 r = amdgpu_device_ip_init(adev);
3843 /* failed in exclusive mode due to timeout */
3844 if (amdgpu_sriov_vf(adev) &&
3845 !amdgpu_sriov_runtime(adev) &&
3846 amdgpu_virt_mmio_blocked(adev) &&
3847 !amdgpu_virt_wait_reset(adev)) {
3848 dev_err(adev->dev, "VF exclusive mode timeout\n");
3849 /* Don't send request since VF is inactive. */
3850 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3851 adev->virt.ops = NULL;
3853 goto release_ras_con;
3855 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3856 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3857 goto release_ras_con;
3860 amdgpu_fence_driver_hw_init(adev);
3863 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3864 adev->gfx.config.max_shader_engines,
3865 adev->gfx.config.max_sh_per_se,
3866 adev->gfx.config.max_cu_per_sh,
3867 adev->gfx.cu_info.number);
3869 adev->accel_working = true;
3871 amdgpu_vm_check_compute_bug(adev);
3873 /* Initialize the buffer migration limit. */
3874 if (amdgpu_moverate >= 0)
3875 max_MBps = amdgpu_moverate;
3877 max_MBps = 8; /* Allow 8 MB/s. */
3878 /* Get a log2 for easy divisions. */
3879 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3881 r = amdgpu_pm_sysfs_init(adev);
3883 DRM_ERROR("registering pm sysfs failed (%d).\n", r);
3885 r = amdgpu_ucode_sysfs_init(adev);
3887 adev->ucode_sysfs_en = false;
3888 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3890 adev->ucode_sysfs_en = true;
3892 r = amdgpu_psp_sysfs_init(adev);
3894 adev->psp_sysfs_en = false;
3895 if (!amdgpu_sriov_vf(adev))
3896 DRM_ERROR("Creating psp sysfs failed\n");
3898 adev->psp_sysfs_en = true;
3901 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3902 * Otherwise the mgpu fan boost feature will be skipped due to the
3903 * gpu instance is counted less.
3905 amdgpu_register_gpu_instance(adev);
3907 /* enable clockgating, etc. after ib tests, etc. since some blocks require
3908 * explicit gating rather than handling it automatically.
3910 if (!adev->gmc.xgmi.pending_reset) {
3911 r = amdgpu_device_ip_late_init(adev);
3913 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3914 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3915 goto release_ras_con;
3918 amdgpu_ras_resume(adev);
3919 queue_delayed_work(system_wq, &adev->delayed_init_work,
3920 msecs_to_jiffies(AMDGPU_RESUME_MS));
3923 if (amdgpu_sriov_vf(adev))
3924 flush_delayed_work(&adev->delayed_init_work);
3926 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3928 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3930 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3931 r = amdgpu_pmu_init(adev);
3933 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3935 /* Have stored pci confspace at hand for restore in sudden PCI error */
3936 if (amdgpu_device_cache_pci_state(adev->pdev))
3937 pci_restore_state(pdev);
3939 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3940 /* this will fail for cards that aren't VGA class devices, just
3942 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3943 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3945 px = amdgpu_device_supports_px(ddev);
3947 if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
3948 apple_gmux_detect(NULL, NULL)))
3949 vga_switcheroo_register_client(adev->pdev,
3950 &amdgpu_switcheroo_ops, px);
3953 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3955 if (adev->gmc.xgmi.pending_reset)
3956 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3957 msecs_to_jiffies(AMDGPU_RESUME_MS));
3959 amdgpu_device_check_iommu_direct_map(adev);
3964 amdgpu_release_ras_context(adev);
3967 amdgpu_vf_error_trans_all(adev);
3972 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3975 /* Clear all CPU mappings pointing to this device */
3976 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3978 /* Unmap all mapped bars - Doorbell, registers and VRAM */
3979 amdgpu_device_doorbell_fini(adev);
3981 iounmap(adev->rmmio);
3983 if (adev->mman.aper_base_kaddr)
3984 iounmap(adev->mman.aper_base_kaddr);
3985 adev->mman.aper_base_kaddr = NULL;
3987 /* Memory manager related */
3988 if (!adev->gmc.xgmi.connected_to_cpu) {
3989 arch_phys_wc_del(adev->gmc.vram_mtrr);
3990 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3995 * amdgpu_device_fini_hw - tear down the driver
3997 * @adev: amdgpu_device pointer
3999 * Tear down the driver info (all asics).
4000 * Called at driver shutdown.
4002 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
4004 dev_info(adev->dev, "amdgpu: finishing device.\n");
4005 flush_delayed_work(&adev->delayed_init_work);
4006 adev->shutdown = true;
4008 /* make sure IB test finished before entering exclusive mode
4009 * to avoid preemption on IB test
4011 if (amdgpu_sriov_vf(adev)) {
4012 amdgpu_virt_request_full_gpu(adev, false);
4013 amdgpu_virt_fini_data_exchange(adev);
4016 /* disable all interrupts */
4017 amdgpu_irq_disable_all(adev);
4018 if (adev->mode_info.mode_config_initialized){
4019 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
4020 drm_helper_force_disable_all(adev_to_drm(adev));
4022 drm_atomic_helper_shutdown(adev_to_drm(adev));
4024 amdgpu_fence_driver_hw_fini(adev);
4026 if (adev->mman.initialized)
4027 drain_workqueue(adev->mman.bdev.wq);
4029 if (adev->pm.sysfs_initialized)
4030 amdgpu_pm_sysfs_fini(adev);
4031 if (adev->ucode_sysfs_en)
4032 amdgpu_ucode_sysfs_fini(adev);
4033 if (adev->psp_sysfs_en)
4034 amdgpu_psp_sysfs_fini(adev);
4035 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
4037 /* disable ras feature must before hw fini */
4038 amdgpu_ras_pre_fini(adev);
4040 amdgpu_device_ip_fini_early(adev);
4042 amdgpu_irq_fini_hw(adev);
4044 if (adev->mman.initialized)
4045 ttm_device_clear_dma_mappings(&adev->mman.bdev);
4047 amdgpu_gart_dummy_page_fini(adev);
4049 if (drm_dev_is_unplugged(adev_to_drm(adev)))
4050 amdgpu_device_unmap_mmio(adev);
4054 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4059 amdgpu_fence_driver_sw_fini(adev);
4060 amdgpu_device_ip_fini(adev);
4061 amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
4062 adev->accel_working = false;
4063 dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4065 amdgpu_reset_fini(adev);
4067 /* free i2c buses */
4068 if (!amdgpu_device_has_dc_support(adev))
4069 amdgpu_i2c_fini(adev);
4071 if (amdgpu_emu_mode != 1)
4072 amdgpu_atombios_fini(adev);
4077 px = amdgpu_device_supports_px(adev_to_drm(adev));
4079 if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
4080 apple_gmux_detect(NULL, NULL)))
4081 vga_switcheroo_unregister_client(adev->pdev);
4084 vga_switcheroo_fini_domain_pm_ops(adev->dev);
4086 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4087 vga_client_unregister(adev->pdev);
4089 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4091 iounmap(adev->rmmio);
4093 amdgpu_device_doorbell_fini(adev);
4097 if (IS_ENABLED(CONFIG_PERF_EVENTS))
4098 amdgpu_pmu_fini(adev);
4099 if (adev->mman.discovery_bin)
4100 amdgpu_discovery_fini(adev);
4102 amdgpu_reset_put_reset_domain(adev->reset_domain);
4103 adev->reset_domain = NULL;
4105 kfree(adev->pci_state);
4110 * amdgpu_device_evict_resources - evict device resources
4111 * @adev: amdgpu device object
4113 * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4114 * of the vram memory type. Mainly used for evicting device resources
4118 static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4122 /* No need to evict vram on APUs for suspend to ram or s2idle */
4123 if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4126 ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4128 DRM_WARN("evicting device resources failed\n");
4136 * amdgpu_device_suspend - initiate device suspend
4138 * @dev: drm dev pointer
4139 * @fbcon : notify the fbdev of suspend
4141 * Puts the hw in the suspend state (all asics).
4142 * Returns 0 for success or an error on failure.
4143 * Called at driver suspend.
4145 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4147 struct amdgpu_device *adev = drm_to_adev(dev);
4150 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4153 adev->in_suspend = true;
4155 /* Evict the majority of BOs before grabbing the full access */
4156 r = amdgpu_device_evict_resources(adev);
4160 if (amdgpu_sriov_vf(adev)) {
4161 amdgpu_virt_fini_data_exchange(adev);
4162 r = amdgpu_virt_request_full_gpu(adev, false);
4167 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4168 DRM_WARN("smart shift update failed\n");
4171 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4173 cancel_delayed_work_sync(&adev->delayed_init_work);
4175 amdgpu_ras_suspend(adev);
4177 amdgpu_device_ip_suspend_phase1(adev);
4180 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4182 r = amdgpu_device_evict_resources(adev);
4186 amdgpu_fence_driver_hw_fini(adev);
4188 amdgpu_device_ip_suspend_phase2(adev);
4190 if (amdgpu_sriov_vf(adev))
4191 amdgpu_virt_release_full_gpu(adev, false);
4197 * amdgpu_device_resume - initiate device resume
4199 * @dev: drm dev pointer
4200 * @fbcon : notify the fbdev of resume
4202 * Bring the hw back to operating state (all asics).
4203 * Returns 0 for success or an error on failure.
4204 * Called at driver resume.
4206 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4208 struct amdgpu_device *adev = drm_to_adev(dev);
4211 if (amdgpu_sriov_vf(adev)) {
4212 r = amdgpu_virt_request_full_gpu(adev, true);
4217 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4221 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4224 if (amdgpu_device_need_post(adev)) {
4225 r = amdgpu_device_asic_init(adev);
4227 dev_err(adev->dev, "amdgpu asic init failed\n");
4230 r = amdgpu_device_ip_resume(adev);
4233 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4236 amdgpu_fence_driver_hw_init(adev);
4238 r = amdgpu_device_ip_late_init(adev);
4242 queue_delayed_work(system_wq, &adev->delayed_init_work,
4243 msecs_to_jiffies(AMDGPU_RESUME_MS));
4245 if (!adev->in_s0ix) {
4246 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4252 if (amdgpu_sriov_vf(adev)) {
4253 amdgpu_virt_init_data_exchange(adev);
4254 amdgpu_virt_release_full_gpu(adev, true);
4260 /* Make sure IB tests flushed */
4261 flush_delayed_work(&adev->delayed_init_work);
4264 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4266 amdgpu_ras_resume(adev);
4268 if (adev->mode_info.num_crtc) {
4270 * Most of the connector probing functions try to acquire runtime pm
4271 * refs to ensure that the GPU is powered on when connector polling is
4272 * performed. Since we're calling this from a runtime PM callback,
4273 * trying to acquire rpm refs will cause us to deadlock.
4275 * Since we're guaranteed to be holding the rpm lock, it's safe to
4276 * temporarily disable the rpm helpers so this doesn't deadlock us.
4279 dev->dev->power.disable_depth++;
4281 if (!adev->dc_enabled)
4282 drm_helper_hpd_irq_event(dev);
4284 drm_kms_helper_hotplug_event(dev);
4286 dev->dev->power.disable_depth--;
4289 adev->in_suspend = false;
4291 if (adev->enable_mes)
4292 amdgpu_mes_self_test(adev);
4294 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4295 DRM_WARN("smart shift update failed\n");
4301 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4303 * @adev: amdgpu_device pointer
4305 * The list of all the hardware IPs that make up the asic is walked and
4306 * the check_soft_reset callbacks are run. check_soft_reset determines
4307 * if the asic is still hung or not.
4308 * Returns true if any of the IPs are still in a hung state, false if not.
4310 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4313 bool asic_hang = false;
4315 if (amdgpu_sriov_vf(adev))
4318 if (amdgpu_asic_need_full_reset(adev))
4321 for (i = 0; i < adev->num_ip_blocks; i++) {
4322 if (!adev->ip_blocks[i].status.valid)
4324 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4325 adev->ip_blocks[i].status.hang =
4326 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4327 if (adev->ip_blocks[i].status.hang) {
4328 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4336 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4338 * @adev: amdgpu_device pointer
4340 * The list of all the hardware IPs that make up the asic is walked and the
4341 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
4342 * handles any IP specific hardware or software state changes that are
4343 * necessary for a soft reset to succeed.
4344 * Returns 0 on success, negative error code on failure.
4346 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4350 for (i = 0; i < adev->num_ip_blocks; i++) {
4351 if (!adev->ip_blocks[i].status.valid)
4353 if (adev->ip_blocks[i].status.hang &&
4354 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4355 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4365 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4367 * @adev: amdgpu_device pointer
4369 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
4370 * reset is necessary to recover.
4371 * Returns true if a full asic reset is required, false if not.
4373 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4377 if (amdgpu_asic_need_full_reset(adev))
4380 for (i = 0; i < adev->num_ip_blocks; i++) {
4381 if (!adev->ip_blocks[i].status.valid)
4383 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4384 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4385 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4386 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4387 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4388 if (adev->ip_blocks[i].status.hang) {
4389 dev_info(adev->dev, "Some block need full reset!\n");
4398 * amdgpu_device_ip_soft_reset - do a soft reset
4400 * @adev: amdgpu_device pointer
4402 * The list of all the hardware IPs that make up the asic is walked and the
4403 * soft_reset callbacks are run if the block is hung. soft_reset handles any
4404 * IP specific hardware or software state changes that are necessary to soft
4406 * Returns 0 on success, negative error code on failure.
4408 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4412 for (i = 0; i < adev->num_ip_blocks; i++) {
4413 if (!adev->ip_blocks[i].status.valid)
4415 if (adev->ip_blocks[i].status.hang &&
4416 adev->ip_blocks[i].version->funcs->soft_reset) {
4417 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4427 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4429 * @adev: amdgpu_device pointer
4431 * The list of all the hardware IPs that make up the asic is walked and the
4432 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
4433 * handles any IP specific hardware or software state changes that are
4434 * necessary after the IP has been soft reset.
4435 * Returns 0 on success, negative error code on failure.
4437 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4441 for (i = 0; i < adev->num_ip_blocks; i++) {
4442 if (!adev->ip_blocks[i].status.valid)
4444 if (adev->ip_blocks[i].status.hang &&
4445 adev->ip_blocks[i].version->funcs->post_soft_reset)
4446 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4455 * amdgpu_device_recover_vram - Recover some VRAM contents
4457 * @adev: amdgpu_device pointer
4459 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
4460 * restore things like GPUVM page tables after a GPU reset where
4461 * the contents of VRAM might be lost.
4464 * 0 on success, negative error code on failure.
4466 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4468 struct dma_fence *fence = NULL, *next = NULL;
4469 struct amdgpu_bo *shadow;
4470 struct amdgpu_bo_vm *vmbo;
4473 if (amdgpu_sriov_runtime(adev))
4474 tmo = msecs_to_jiffies(8000);
4476 tmo = msecs_to_jiffies(100);
4478 dev_info(adev->dev, "recover vram bo from shadow start\n");
4479 mutex_lock(&adev->shadow_list_lock);
4480 list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4482 /* No need to recover an evicted BO */
4483 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4484 shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4485 shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4488 r = amdgpu_bo_restore_shadow(shadow, &next);
4493 tmo = dma_fence_wait_timeout(fence, false, tmo);
4494 dma_fence_put(fence);
4499 } else if (tmo < 0) {
4507 mutex_unlock(&adev->shadow_list_lock);
4510 tmo = dma_fence_wait_timeout(fence, false, tmo);
4511 dma_fence_put(fence);
4513 if (r < 0 || tmo <= 0) {
4514 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4518 dev_info(adev->dev, "recover vram bo from shadow done\n");
4524 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4526 * @adev: amdgpu_device pointer
4527 * @from_hypervisor: request from hypervisor
4529 * do VF FLR and reinitialize Asic
4530 * return 0 means succeeded otherwise failed
4532 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4533 bool from_hypervisor)
4536 struct amdgpu_hive_info *hive = NULL;
4537 int retry_limit = 0;
4540 amdgpu_amdkfd_pre_reset(adev);
4542 if (from_hypervisor)
4543 r = amdgpu_virt_request_full_gpu(adev, true);
4545 r = amdgpu_virt_reset_gpu(adev);
4549 /* Resume IP prior to SMC */
4550 r = amdgpu_device_ip_reinit_early_sriov(adev);
4554 amdgpu_virt_init_data_exchange(adev);
4556 r = amdgpu_device_fw_loading(adev);
4560 /* now we are okay to resume SMC/CP/SDMA */
4561 r = amdgpu_device_ip_reinit_late_sriov(adev);
4565 hive = amdgpu_get_xgmi_hive(adev);
4566 /* Update PSP FW topology after reset */
4567 if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4568 r = amdgpu_xgmi_update_topology(hive, adev);
4571 amdgpu_put_xgmi_hive(hive);
4574 amdgpu_irq_gpu_reset_resume_helper(adev);
4575 r = amdgpu_ib_ring_tests(adev);
4577 amdgpu_amdkfd_post_reset(adev);
4581 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4582 amdgpu_inc_vram_lost(adev);
4583 r = amdgpu_device_recover_vram(adev);
4585 amdgpu_virt_release_full_gpu(adev, true);
4587 if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4588 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4592 DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4599 * amdgpu_device_has_job_running - check if there is any job in mirror list
4601 * @adev: amdgpu_device pointer
4603 * check if there is any job in mirror list
4605 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4608 struct drm_sched_job *job;
4610 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4611 struct amdgpu_ring *ring = adev->rings[i];
4613 if (!ring || !ring->sched.thread)
4616 spin_lock(&ring->sched.job_list_lock);
4617 job = list_first_entry_or_null(&ring->sched.pending_list,
4618 struct drm_sched_job, list);
4619 spin_unlock(&ring->sched.job_list_lock);
4627 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4629 * @adev: amdgpu_device pointer
4631 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4634 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4637 if (amdgpu_gpu_recovery == 0)
4640 /* Skip soft reset check in fatal error mode */
4641 if (!amdgpu_ras_is_poison_mode_supported(adev))
4644 if (amdgpu_sriov_vf(adev))
4647 if (amdgpu_gpu_recovery == -1) {
4648 switch (adev->asic_type) {
4649 #ifdef CONFIG_DRM_AMDGPU_SI
4656 #ifdef CONFIG_DRM_AMDGPU_CIK
4663 case CHIP_CYAN_SKILLFISH:
4673 dev_info(adev->dev, "GPU recovery disabled.\n");
4677 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4682 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4684 dev_info(adev->dev, "GPU mode1 reset\n");
4687 pci_clear_master(adev->pdev);
4689 amdgpu_device_cache_pci_state(adev->pdev);
4691 if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4692 dev_info(adev->dev, "GPU smu mode1 reset\n");
4693 ret = amdgpu_dpm_mode1_reset(adev);
4695 dev_info(adev->dev, "GPU psp mode1 reset\n");
4696 ret = psp_gpu_reset(adev);
4700 dev_err(adev->dev, "GPU mode1 reset failed\n");
4702 amdgpu_device_load_pci_state(adev->pdev);
4704 /* wait for asic to come out of reset */
4705 for (i = 0; i < adev->usec_timeout; i++) {
4706 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4708 if (memsize != 0xffffffff)
4713 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4717 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4718 struct amdgpu_reset_context *reset_context)
4721 struct amdgpu_job *job = NULL;
4722 bool need_full_reset =
4723 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4725 if (reset_context->reset_req_dev == adev)
4726 job = reset_context->job;
4728 if (amdgpu_sriov_vf(adev)) {
4729 /* stop the data exchange thread */
4730 amdgpu_virt_fini_data_exchange(adev);
4733 amdgpu_fence_driver_isr_toggle(adev, true);
4735 /* block all schedulers and reset given job's ring */
4736 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4737 struct amdgpu_ring *ring = adev->rings[i];
4739 if (!ring || !ring->sched.thread)
4742 /*clear job fence from fence drv to avoid force_completion
4743 *leave NULL and vm flush fence in fence drv */
4744 amdgpu_fence_driver_clear_job_fences(ring);
4746 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4747 amdgpu_fence_driver_force_completion(ring);
4750 amdgpu_fence_driver_isr_toggle(adev, false);
4753 drm_sched_increase_karma(&job->base);
4755 r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4756 /* If reset handler not implemented, continue; otherwise return */
4762 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4763 if (!amdgpu_sriov_vf(adev)) {
4765 if (!need_full_reset)
4766 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4768 if (!need_full_reset && amdgpu_gpu_recovery &&
4769 amdgpu_device_ip_check_soft_reset(adev)) {
4770 amdgpu_device_ip_pre_soft_reset(adev);
4771 r = amdgpu_device_ip_soft_reset(adev);
4772 amdgpu_device_ip_post_soft_reset(adev);
4773 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4774 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4775 need_full_reset = true;
4779 if (need_full_reset)
4780 r = amdgpu_device_ip_suspend(adev);
4781 if (need_full_reset)
4782 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4784 clear_bit(AMDGPU_NEED_FULL_RESET,
4785 &reset_context->flags);
4791 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
4795 lockdep_assert_held(&adev->reset_domain->sem);
4797 for (i = 0; i < adev->num_regs; i++) {
4798 adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]);
4799 trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
4800 adev->reset_dump_reg_value[i]);
4806 #ifdef CONFIG_DEV_COREDUMP
4807 static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
4808 size_t count, void *data, size_t datalen)
4810 struct drm_printer p;
4811 struct amdgpu_device *adev = data;
4812 struct drm_print_iterator iter;
4817 iter.start = offset;
4818 iter.remain = count;
4820 p = drm_coredump_printer(&iter);
4822 drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
4823 drm_printf(&p, "kernel: " UTS_RELEASE "\n");
4824 drm_printf(&p, "module: " KBUILD_MODNAME "\n");
4825 drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec);
4826 if (adev->reset_task_info.pid)
4827 drm_printf(&p, "process_name: %s PID: %d\n",
4828 adev->reset_task_info.process_name,
4829 adev->reset_task_info.pid);
4831 if (adev->reset_vram_lost)
4832 drm_printf(&p, "VRAM is lost due to GPU reset!\n");
4833 if (adev->num_regs) {
4834 drm_printf(&p, "AMDGPU register dumps:\nOffset: Value:\n");
4836 for (i = 0; i < adev->num_regs; i++)
4837 drm_printf(&p, "0x%08x: 0x%08x\n",
4838 adev->reset_dump_reg_list[i],
4839 adev->reset_dump_reg_value[i]);
4842 return count - iter.remain;
4845 static void amdgpu_devcoredump_free(void *data)
4849 static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
4851 struct drm_device *dev = adev_to_drm(adev);
4853 ktime_get_ts64(&adev->reset_time);
4854 dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
4855 amdgpu_devcoredump_read, amdgpu_devcoredump_free);
4859 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4860 struct amdgpu_reset_context *reset_context)
4862 struct amdgpu_device *tmp_adev = NULL;
4863 bool need_full_reset, skip_hw_reset, vram_lost = false;
4865 bool gpu_reset_for_dev_remove = 0;
4867 /* Try reset handler method first */
4868 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4870 amdgpu_reset_reg_dumps(tmp_adev);
4872 reset_context->reset_device_list = device_list_handle;
4873 r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4874 /* If reset handler not implemented, continue; otherwise return */
4880 /* Reset handler not implemented, use the default method */
4882 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4883 skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4885 gpu_reset_for_dev_remove =
4886 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
4887 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4890 * ASIC reset has to be done on all XGMI hive nodes ASAP
4891 * to allow proper links negotiation in FW (within 1 sec)
4893 if (!skip_hw_reset && need_full_reset) {
4894 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4895 /* For XGMI run all resets in parallel to speed up the process */
4896 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4897 tmp_adev->gmc.xgmi.pending_reset = false;
4898 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4901 r = amdgpu_asic_reset(tmp_adev);
4904 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4905 r, adev_to_drm(tmp_adev)->unique);
4910 /* For XGMI wait for all resets to complete before proceed */
4912 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4913 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4914 flush_work(&tmp_adev->xgmi_reset_work);
4915 r = tmp_adev->asic_reset_res;
4923 if (!r && amdgpu_ras_intr_triggered()) {
4924 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4925 if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
4926 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
4927 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
4930 amdgpu_ras_intr_cleared();
4933 /* Since the mode1 reset affects base ip blocks, the
4934 * phase1 ip blocks need to be resumed. Otherwise there
4935 * will be a BIOS signature error and the psp bootloader
4936 * can't load kdb on the next amdgpu install.
4938 if (gpu_reset_for_dev_remove) {
4939 list_for_each_entry(tmp_adev, device_list_handle, reset_list)
4940 amdgpu_device_ip_resume_phase1(tmp_adev);
4945 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4946 if (need_full_reset) {
4948 r = amdgpu_device_asic_init(tmp_adev);
4950 dev_warn(tmp_adev->dev, "asic atom init failed!");
4952 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4953 r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4957 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4961 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4962 #ifdef CONFIG_DEV_COREDUMP
4963 tmp_adev->reset_vram_lost = vram_lost;
4964 memset(&tmp_adev->reset_task_info, 0,
4965 sizeof(tmp_adev->reset_task_info));
4966 if (reset_context->job && reset_context->job->vm)
4967 tmp_adev->reset_task_info =
4968 reset_context->job->vm->task_info;
4969 amdgpu_reset_capture_coredumpm(tmp_adev);
4972 DRM_INFO("VRAM is lost due to GPU reset!\n");
4973 amdgpu_inc_vram_lost(tmp_adev);
4976 r = amdgpu_device_fw_loading(tmp_adev);
4980 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4985 amdgpu_device_fill_reset_magic(tmp_adev);
4988 * Add this ASIC as tracked as reset was already
4989 * complete successfully.
4991 amdgpu_register_gpu_instance(tmp_adev);
4993 if (!reset_context->hive &&
4994 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4995 amdgpu_xgmi_add_device(tmp_adev);
4997 r = amdgpu_device_ip_late_init(tmp_adev);
5001 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
5004 * The GPU enters bad state once faulty pages
5005 * by ECC has reached the threshold, and ras
5006 * recovery is scheduled next. So add one check
5007 * here to break recovery if it indeed exceeds
5008 * bad page threshold, and remind user to
5009 * retire this GPU or setting one bigger
5010 * bad_page_threshold value to fix this once
5011 * probing driver again.
5013 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
5015 amdgpu_ras_resume(tmp_adev);
5021 /* Update PSP FW topology after reset */
5022 if (reset_context->hive &&
5023 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5024 r = amdgpu_xgmi_update_topology(
5025 reset_context->hive, tmp_adev);
5031 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
5032 r = amdgpu_ib_ring_tests(tmp_adev);
5034 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
5035 need_full_reset = true;
5042 r = amdgpu_device_recover_vram(tmp_adev);
5044 tmp_adev->asic_reset_res = r;
5048 if (need_full_reset)
5049 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5051 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5055 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5058 switch (amdgpu_asic_reset_method(adev)) {
5059 case AMD_RESET_METHOD_MODE1:
5060 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5062 case AMD_RESET_METHOD_MODE2:
5063 adev->mp1_state = PP_MP1_STATE_RESET;
5066 adev->mp1_state = PP_MP1_STATE_NONE;
5071 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5073 amdgpu_vf_error_trans_all(adev);
5074 adev->mp1_state = PP_MP1_STATE_NONE;
5077 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5079 struct pci_dev *p = NULL;
5081 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5082 adev->pdev->bus->number, 1);
5084 pm_runtime_enable(&(p->dev));
5085 pm_runtime_resume(&(p->dev));
5091 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5093 enum amd_reset_method reset_method;
5094 struct pci_dev *p = NULL;
5098 * For now, only BACO and mode1 reset are confirmed
5099 * to suffer the audio issue without proper suspended.
5101 reset_method = amdgpu_asic_reset_method(adev);
5102 if ((reset_method != AMD_RESET_METHOD_BACO) &&
5103 (reset_method != AMD_RESET_METHOD_MODE1))
5106 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5107 adev->pdev->bus->number, 1);
5111 expires = pm_runtime_autosuspend_expiration(&(p->dev));
5114 * If we cannot get the audio device autosuspend delay,
5115 * a fixed 4S interval will be used. Considering 3S is
5116 * the audio controller default autosuspend delay setting.
5117 * 4S used here is guaranteed to cover that.
5119 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5121 while (!pm_runtime_status_suspended(&(p->dev))) {
5122 if (!pm_runtime_suspend(&(p->dev)))
5125 if (expires < ktime_get_mono_fast_ns()) {
5126 dev_warn(adev->dev, "failed to suspend display audio\n");
5128 /* TODO: abort the succeeding gpu reset? */
5133 pm_runtime_disable(&(p->dev));
5139 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5141 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5143 #if defined(CONFIG_DEBUG_FS)
5144 if (!amdgpu_sriov_vf(adev))
5145 cancel_work(&adev->reset_work);
5149 cancel_work(&adev->kfd.reset_work);
5151 if (amdgpu_sriov_vf(adev))
5152 cancel_work(&adev->virt.flr_work);
5154 if (con && adev->ras_enabled)
5155 cancel_work(&con->recovery_work);
5160 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5162 * @adev: amdgpu_device pointer
5163 * @job: which job trigger hang
5165 * Attempt to reset the GPU if it has hung (all asics).
5166 * Attempt to do soft-reset or full-reset and reinitialize Asic
5167 * Returns 0 for success or an error on failure.
5170 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5171 struct amdgpu_job *job,
5172 struct amdgpu_reset_context *reset_context)
5174 struct list_head device_list, *device_list_handle = NULL;
5175 bool job_signaled = false;
5176 struct amdgpu_hive_info *hive = NULL;
5177 struct amdgpu_device *tmp_adev = NULL;
5179 bool need_emergency_restart = false;
5180 bool audio_suspended = false;
5181 bool gpu_reset_for_dev_remove = false;
5183 gpu_reset_for_dev_remove =
5184 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
5185 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5188 * Special case: RAS triggered and full reset isn't supported
5190 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5193 * Flush RAM to disk so that after reboot
5194 * the user can read log and see why the system rebooted.
5196 if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
5197 DRM_WARN("Emergency reboot.");
5200 emergency_restart();
5203 dev_info(adev->dev, "GPU %s begin!\n",
5204 need_emergency_restart ? "jobs stop":"reset");
5206 if (!amdgpu_sriov_vf(adev))
5207 hive = amdgpu_get_xgmi_hive(adev);
5209 mutex_lock(&hive->hive_lock);
5211 reset_context->job = job;
5212 reset_context->hive = hive;
5214 * Build list of devices to reset.
5215 * In case we are in XGMI hive mode, resort the device list
5216 * to put adev in the 1st position.
5218 INIT_LIST_HEAD(&device_list);
5219 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5220 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5221 list_add_tail(&tmp_adev->reset_list, &device_list);
5222 if (gpu_reset_for_dev_remove && adev->shutdown)
5223 tmp_adev->shutdown = true;
5225 if (!list_is_first(&adev->reset_list, &device_list))
5226 list_rotate_to_front(&adev->reset_list, &device_list);
5227 device_list_handle = &device_list;
5229 list_add_tail(&adev->reset_list, &device_list);
5230 device_list_handle = &device_list;
5233 /* We need to lock reset domain only once both for XGMI and single device */
5234 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5236 amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5238 /* block all schedulers and reset given job's ring */
5239 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5241 amdgpu_device_set_mp1_state(tmp_adev);
5244 * Try to put the audio codec into suspend state
5245 * before gpu reset started.
5247 * Due to the power domain of the graphics device
5248 * is shared with AZ power domain. Without this,
5249 * we may change the audio hardware from behind
5250 * the audio driver's back. That will trigger
5251 * some audio codec errors.
5253 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5254 audio_suspended = true;
5256 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5258 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5260 if (!amdgpu_sriov_vf(tmp_adev))
5261 amdgpu_amdkfd_pre_reset(tmp_adev);
5264 * Mark these ASICs to be reseted as untracked first
5265 * And add them back after reset completed
5267 amdgpu_unregister_gpu_instance(tmp_adev);
5269 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5271 /* disable ras on ALL IPs */
5272 if (!need_emergency_restart &&
5273 amdgpu_device_ip_need_full_reset(tmp_adev))
5274 amdgpu_ras_suspend(tmp_adev);
5276 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5277 struct amdgpu_ring *ring = tmp_adev->rings[i];
5279 if (!ring || !ring->sched.thread)
5282 drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5284 if (need_emergency_restart)
5285 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5287 atomic_inc(&tmp_adev->gpu_reset_counter);
5290 if (need_emergency_restart)
5291 goto skip_sched_resume;
5294 * Must check guilty signal here since after this point all old
5295 * HW fences are force signaled.
5297 * job->base holds a reference to parent fence
5299 if (job && dma_fence_is_signaled(&job->hw_fence)) {
5300 job_signaled = true;
5301 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5305 retry: /* Rest of adevs pre asic reset from XGMI hive. */
5306 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5307 if (gpu_reset_for_dev_remove) {
5308 /* Workaroud for ASICs need to disable SMC first */
5309 amdgpu_device_smu_fini_early(tmp_adev);
5311 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5312 /*TODO Should we stop ?*/
5314 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5315 r, adev_to_drm(tmp_adev)->unique);
5316 tmp_adev->asic_reset_res = r;
5320 * Drop all pending non scheduler resets. Scheduler resets
5321 * were already dropped during drm_sched_stop
5323 amdgpu_device_stop_pending_resets(tmp_adev);
5326 /* Actual ASIC resets if needed.*/
5327 /* Host driver will handle XGMI hive reset for SRIOV */
5328 if (amdgpu_sriov_vf(adev)) {
5329 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5331 adev->asic_reset_res = r;
5333 /* Aldebaran supports ras in SRIOV, so need resume ras during reset */
5334 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
5335 amdgpu_ras_resume(adev);
5337 r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5338 if (r && r == -EAGAIN)
5341 if (!r && gpu_reset_for_dev_remove)
5347 /* Post ASIC reset for all devs .*/
5348 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5350 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5351 struct amdgpu_ring *ring = tmp_adev->rings[i];
5353 if (!ring || !ring->sched.thread)
5356 drm_sched_start(&ring->sched, true);
5359 if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))
5360 amdgpu_mes_self_test(tmp_adev);
5362 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
5363 drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5366 if (tmp_adev->asic_reset_res)
5367 r = tmp_adev->asic_reset_res;
5369 tmp_adev->asic_reset_res = 0;
5372 /* bad news, how to tell it to userspace ? */
5373 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5374 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5376 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5377 if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5378 DRM_WARN("smart shift update failed\n");
5383 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5384 /* unlock kfd: SRIOV would do it separately */
5385 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5386 amdgpu_amdkfd_post_reset(tmp_adev);
5388 /* kfd_post_reset will do nothing if kfd device is not initialized,
5389 * need to bring up kfd here if it's not be initialized before
5391 if (!adev->kfd.init_complete)
5392 amdgpu_amdkfd_device_init(adev);
5394 if (audio_suspended)
5395 amdgpu_device_resume_display_audio(tmp_adev);
5397 amdgpu_device_unset_mp1_state(tmp_adev);
5399 amdgpu_ras_set_error_query_ready(tmp_adev, true);
5403 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5405 amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5408 mutex_unlock(&hive->hive_lock);
5409 amdgpu_put_xgmi_hive(hive);
5413 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5415 atomic_set(&adev->reset_domain->reset_res, r);
5420 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5422 * @adev: amdgpu_device pointer
5424 * Fetchs and stores in the driver the PCIE capabilities (gen speed
5425 * and lanes) of the slot the device is in. Handles APUs and
5426 * virtualized environments where PCIE config space may not be available.
5428 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5430 struct pci_dev *pdev;
5431 enum pci_bus_speed speed_cap, platform_speed_cap;
5432 enum pcie_link_width platform_link_width;
5434 if (amdgpu_pcie_gen_cap)
5435 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5437 if (amdgpu_pcie_lane_cap)
5438 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5440 /* covers APUs as well */
5441 if (pci_is_root_bus(adev->pdev->bus)) {
5442 if (adev->pm.pcie_gen_mask == 0)
5443 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5444 if (adev->pm.pcie_mlw_mask == 0)
5445 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5449 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5452 pcie_bandwidth_available(adev->pdev, NULL,
5453 &platform_speed_cap, &platform_link_width);
5455 if (adev->pm.pcie_gen_mask == 0) {
5458 speed_cap = pcie_get_speed_cap(pdev);
5459 if (speed_cap == PCI_SPEED_UNKNOWN) {
5460 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5461 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5462 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5464 if (speed_cap == PCIE_SPEED_32_0GT)
5465 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5466 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5467 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5468 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5469 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5470 else if (speed_cap == PCIE_SPEED_16_0GT)
5471 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5472 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5473 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5474 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5475 else if (speed_cap == PCIE_SPEED_8_0GT)
5476 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5477 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5478 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5479 else if (speed_cap == PCIE_SPEED_5_0GT)
5480 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5481 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5483 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5486 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5487 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5488 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5490 if (platform_speed_cap == PCIE_SPEED_32_0GT)
5491 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5492 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5493 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5494 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5495 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5496 else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5497 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5498 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5499 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5500 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5501 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5502 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5503 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5504 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5505 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5506 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5507 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5509 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5513 if (adev->pm.pcie_mlw_mask == 0) {
5514 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5515 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5517 switch (platform_link_width) {
5519 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5520 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5521 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5522 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5523 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5524 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5525 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5528 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5529 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5530 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5531 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5532 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5533 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5536 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5537 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5538 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5539 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5540 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5543 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5544 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5545 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5546 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5549 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5550 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5551 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5554 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5555 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5558 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5568 * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5570 * @adev: amdgpu_device pointer
5571 * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5573 * Return true if @peer_adev can access (DMA) @adev through the PCIe
5574 * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5577 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5578 struct amdgpu_device *peer_adev)
5580 #ifdef CONFIG_HSA_AMD_P2P
5581 uint64_t address_mask = peer_adev->dev->dma_mask ?
5582 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5583 resource_size_t aper_limit =
5584 adev->gmc.aper_base + adev->gmc.aper_size - 1;
5586 !adev->gmc.xgmi.connected_to_cpu &&
5587 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
5589 return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5590 adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5591 !(adev->gmc.aper_base & address_mask ||
5592 aper_limit & address_mask));
5598 int amdgpu_device_baco_enter(struct drm_device *dev)
5600 struct amdgpu_device *adev = drm_to_adev(dev);
5601 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5603 if (!amdgpu_device_supports_baco(dev))
5606 if (ras && adev->ras_enabled &&
5607 adev->nbio.funcs->enable_doorbell_interrupt)
5608 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5610 return amdgpu_dpm_baco_enter(adev);
5613 int amdgpu_device_baco_exit(struct drm_device *dev)
5615 struct amdgpu_device *adev = drm_to_adev(dev);
5616 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5619 if (!amdgpu_device_supports_baco(dev))
5622 ret = amdgpu_dpm_baco_exit(adev);
5626 if (ras && adev->ras_enabled &&
5627 adev->nbio.funcs->enable_doorbell_interrupt)
5628 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5630 if (amdgpu_passthrough(adev) &&
5631 adev->nbio.funcs->clear_doorbell_interrupt)
5632 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5638 * amdgpu_pci_error_detected - Called when a PCI error is detected.
5639 * @pdev: PCI device struct
5640 * @state: PCI channel state
5642 * Description: Called when a PCI error is detected.
5644 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5646 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5648 struct drm_device *dev = pci_get_drvdata(pdev);
5649 struct amdgpu_device *adev = drm_to_adev(dev);
5652 DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5654 if (adev->gmc.xgmi.num_physical_nodes > 1) {
5655 DRM_WARN("No support for XGMI hive yet...");
5656 return PCI_ERS_RESULT_DISCONNECT;
5659 adev->pci_channel_state = state;
5662 case pci_channel_io_normal:
5663 return PCI_ERS_RESULT_CAN_RECOVER;
5664 /* Fatal error, prepare for slot reset */
5665 case pci_channel_io_frozen:
5667 * Locking adev->reset_domain->sem will prevent any external access
5668 * to GPU during PCI error recovery
5670 amdgpu_device_lock_reset_domain(adev->reset_domain);
5671 amdgpu_device_set_mp1_state(adev);
5674 * Block any work scheduling as we do for regular GPU reset
5675 * for the duration of the recovery
5677 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5678 struct amdgpu_ring *ring = adev->rings[i];
5680 if (!ring || !ring->sched.thread)
5683 drm_sched_stop(&ring->sched, NULL);
5685 atomic_inc(&adev->gpu_reset_counter);
5686 return PCI_ERS_RESULT_NEED_RESET;
5687 case pci_channel_io_perm_failure:
5688 /* Permanent error, prepare for device removal */
5689 return PCI_ERS_RESULT_DISCONNECT;
5692 return PCI_ERS_RESULT_NEED_RESET;
5696 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5697 * @pdev: pointer to PCI device
5699 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5702 DRM_INFO("PCI error: mmio enabled callback!!\n");
5704 /* TODO - dump whatever for debugging purposes */
5706 /* This called only if amdgpu_pci_error_detected returns
5707 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5708 * works, no need to reset slot.
5711 return PCI_ERS_RESULT_RECOVERED;
5715 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5716 * @pdev: PCI device struct
5718 * Description: This routine is called by the pci error recovery
5719 * code after the PCI slot has been reset, just before we
5720 * should resume normal operations.
5722 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5724 struct drm_device *dev = pci_get_drvdata(pdev);
5725 struct amdgpu_device *adev = drm_to_adev(dev);
5727 struct amdgpu_reset_context reset_context;
5729 struct list_head device_list;
5731 DRM_INFO("PCI error: slot reset callback!!\n");
5733 memset(&reset_context, 0, sizeof(reset_context));
5735 INIT_LIST_HEAD(&device_list);
5736 list_add_tail(&adev->reset_list, &device_list);
5738 /* wait for asic to come out of reset */
5741 /* Restore PCI confspace */
5742 amdgpu_device_load_pci_state(pdev);
5744 /* confirm ASIC came out of reset */
5745 for (i = 0; i < adev->usec_timeout; i++) {
5746 memsize = amdgpu_asic_get_config_memsize(adev);
5748 if (memsize != 0xffffffff)
5752 if (memsize == 0xffffffff) {
5757 reset_context.method = AMD_RESET_METHOD_NONE;
5758 reset_context.reset_req_dev = adev;
5759 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5760 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5762 adev->no_hw_access = true;
5763 r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5764 adev->no_hw_access = false;
5768 r = amdgpu_do_asic_reset(&device_list, &reset_context);
5772 if (amdgpu_device_cache_pci_state(adev->pdev))
5773 pci_restore_state(adev->pdev);
5775 DRM_INFO("PCIe error recovery succeeded\n");
5777 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5778 amdgpu_device_unset_mp1_state(adev);
5779 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5782 return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5786 * amdgpu_pci_resume() - resume normal ops after PCI reset
5787 * @pdev: pointer to PCI device
5789 * Called when the error recovery driver tells us that its
5790 * OK to resume normal operation.
5792 void amdgpu_pci_resume(struct pci_dev *pdev)
5794 struct drm_device *dev = pci_get_drvdata(pdev);
5795 struct amdgpu_device *adev = drm_to_adev(dev);
5799 DRM_INFO("PCI error: resume callback!!\n");
5801 /* Only continue execution for the case of pci_channel_io_frozen */
5802 if (adev->pci_channel_state != pci_channel_io_frozen)
5805 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5806 struct amdgpu_ring *ring = adev->rings[i];
5808 if (!ring || !ring->sched.thread)
5811 drm_sched_start(&ring->sched, true);
5814 amdgpu_device_unset_mp1_state(adev);
5815 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5818 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5820 struct drm_device *dev = pci_get_drvdata(pdev);
5821 struct amdgpu_device *adev = drm_to_adev(dev);
5824 r = pci_save_state(pdev);
5826 kfree(adev->pci_state);
5828 adev->pci_state = pci_store_saved_state(pdev);
5830 if (!adev->pci_state) {
5831 DRM_ERROR("Failed to store PCI saved state");
5835 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5842 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5844 struct drm_device *dev = pci_get_drvdata(pdev);
5845 struct amdgpu_device *adev = drm_to_adev(dev);
5848 if (!adev->pci_state)
5851 r = pci_load_saved_state(pdev, adev->pci_state);
5854 pci_restore_state(pdev);
5856 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5863 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5864 struct amdgpu_ring *ring)
5866 #ifdef CONFIG_X86_64
5867 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5870 if (adev->gmc.xgmi.connected_to_cpu)
5873 if (ring && ring->funcs->emit_hdp_flush)
5874 amdgpu_ring_emit_hdp_flush(ring);
5876 amdgpu_asic_flush_hdp(adev, ring);
5879 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5880 struct amdgpu_ring *ring)
5882 #ifdef CONFIG_X86_64
5883 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5886 if (adev->gmc.xgmi.connected_to_cpu)
5889 amdgpu_asic_invalidate_hdp(adev, ring);
5892 int amdgpu_in_reset(struct amdgpu_device *adev)
5894 return atomic_read(&adev->reset_domain->in_gpu_reset);
5898 * amdgpu_device_halt() - bring hardware to some kind of halt state
5900 * @adev: amdgpu_device pointer
5902 * Bring hardware to some kind of halt state so that no one can touch it
5903 * any more. It will help to maintain error context when error occurred.
5904 * Compare to a simple hang, the system will keep stable at least for SSH
5905 * access. Then it should be trivial to inspect the hardware state and
5906 * see what's going on. Implemented as following:
5908 * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
5909 * clears all CPU mappings to device, disallows remappings through page faults
5910 * 2. amdgpu_irq_disable_all() disables all interrupts
5911 * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
5912 * 4. set adev->no_hw_access to avoid potential crashes after setp 5
5913 * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
5914 * 6. pci_disable_device() and pci_wait_for_pending_transaction()
5915 * flush any in flight DMA operations
5917 void amdgpu_device_halt(struct amdgpu_device *adev)
5919 struct pci_dev *pdev = adev->pdev;
5920 struct drm_device *ddev = adev_to_drm(adev);
5922 drm_dev_unplug(ddev);
5924 amdgpu_irq_disable_all(adev);
5926 amdgpu_fence_driver_hw_fini(adev);
5928 adev->no_hw_access = true;
5930 amdgpu_device_unmap_mmio(adev);
5932 pci_disable_device(pdev);
5933 pci_wait_for_pending_transaction(pdev);
5936 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
5939 unsigned long flags, address, data;
5942 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5943 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5945 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5946 WREG32(address, reg * 4);
5947 (void)RREG32(address);
5949 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5953 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
5956 unsigned long flags, address, data;
5958 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5959 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5961 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5962 WREG32(address, reg * 4);
5963 (void)RREG32(address);
5966 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5970 * amdgpu_device_switch_gang - switch to a new gang
5971 * @adev: amdgpu_device pointer
5972 * @gang: the gang to switch to
5974 * Try to switch to a new gang.
5975 * Returns: NULL if we switched to the new gang or a reference to the current
5978 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
5979 struct dma_fence *gang)
5981 struct dma_fence *old = NULL;
5986 old = dma_fence_get_rcu_safe(&adev->gang_submit);
5992 if (!dma_fence_is_signaled(old))
5995 } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
6002 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
6004 switch (adev->asic_type) {
6005 #ifdef CONFIG_DRM_AMDGPU_SI
6009 /* chips with no display hardware */
6011 #ifdef CONFIG_DRM_AMDGPU_SI
6017 #ifdef CONFIG_DRM_AMDGPU_CIK
6026 case CHIP_POLARIS10:
6027 case CHIP_POLARIS11:
6028 case CHIP_POLARIS12:
6032 /* chips with display hardware */
6036 if (!adev->ip_versions[DCE_HWIP][0] ||
6037 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))