2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_probe_helper.h>
38 #include <drm/amdgpu_drm.h>
39 #include <linux/vgaarb.h>
40 #include <linux/vga_switcheroo.h>
41 #include <linux/efi.h>
43 #include "amdgpu_trace.h"
44 #include "amdgpu_i2c.h"
46 #include "amdgpu_atombios.h"
47 #include "amdgpu_atomfirmware.h"
49 #ifdef CONFIG_DRM_AMDGPU_SI
52 #ifdef CONFIG_DRM_AMDGPU_CIK
58 #include "bif/bif_4_1_d.h"
59 #include <linux/firmware.h>
60 #include "amdgpu_vf_error.h"
62 #include "amdgpu_amdkfd.h"
63 #include "amdgpu_pm.h"
65 #include "amdgpu_xgmi.h"
66 #include "amdgpu_ras.h"
67 #include "amdgpu_pmu.h"
68 #include "amdgpu_fru_eeprom.h"
69 #include "amdgpu_reset.h"
71 #include <linux/suspend.h>
72 #include <drm/task_barrier.h>
73 #include <linux/pm_runtime.h>
75 #include <drm/drm_drv.h>
77 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
78 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
79 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
80 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
85 #define AMDGPU_RESUME_MS 2000
86 #define AMDGPU_MAX_RETRY_LIMIT 2
87 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
89 const char *amdgpu_asic_name[] = {
131 * DOC: pcie_replay_count
133 * The amdgpu driver provides a sysfs API for reporting the total number
134 * of PCIe replays (NAKs)
135 * The file pcie_replay_count is used for this and returns the total
136 * number of replays as a sum of the NAKs generated and NAKs received
139 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
140 struct device_attribute *attr, char *buf)
142 struct drm_device *ddev = dev_get_drvdata(dev);
143 struct amdgpu_device *adev = drm_to_adev(ddev);
144 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
146 return sysfs_emit(buf, "%llu\n", cnt);
149 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
150 amdgpu_device_get_pcie_replay_count, NULL);
152 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
157 * The amdgpu driver provides a sysfs API for reporting the product name
159 * The file serial_number is used for this and returns the product name
160 * as returned from the FRU.
161 * NOTE: This is only available for certain server cards
164 static ssize_t amdgpu_device_get_product_name(struct device *dev,
165 struct device_attribute *attr, char *buf)
167 struct drm_device *ddev = dev_get_drvdata(dev);
168 struct amdgpu_device *adev = drm_to_adev(ddev);
170 return sysfs_emit(buf, "%s\n", adev->product_name);
173 static DEVICE_ATTR(product_name, S_IRUGO,
174 amdgpu_device_get_product_name, NULL);
177 * DOC: product_number
179 * The amdgpu driver provides a sysfs API for reporting the part number
181 * The file serial_number is used for this and returns the part number
182 * as returned from the FRU.
183 * NOTE: This is only available for certain server cards
186 static ssize_t amdgpu_device_get_product_number(struct device *dev,
187 struct device_attribute *attr, char *buf)
189 struct drm_device *ddev = dev_get_drvdata(dev);
190 struct amdgpu_device *adev = drm_to_adev(ddev);
192 return sysfs_emit(buf, "%s\n", adev->product_number);
195 static DEVICE_ATTR(product_number, S_IRUGO,
196 amdgpu_device_get_product_number, NULL);
201 * The amdgpu driver provides a sysfs API for reporting the serial number
203 * The file serial_number is used for this and returns the serial number
204 * as returned from the FRU.
205 * NOTE: This is only available for certain server cards
208 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
209 struct device_attribute *attr, char *buf)
211 struct drm_device *ddev = dev_get_drvdata(dev);
212 struct amdgpu_device *adev = drm_to_adev(ddev);
214 return sysfs_emit(buf, "%s\n", adev->serial);
217 static DEVICE_ATTR(serial_number, S_IRUGO,
218 amdgpu_device_get_serial_number, NULL);
221 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
223 * @dev: drm_device pointer
225 * Returns true if the device is a dGPU with ATPX power control,
226 * otherwise return false.
228 bool amdgpu_device_supports_px(struct drm_device *dev)
230 struct amdgpu_device *adev = drm_to_adev(dev);
232 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
238 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
240 * @dev: drm_device pointer
242 * Returns true if the device is a dGPU with ACPI power control,
243 * otherwise return false.
245 bool amdgpu_device_supports_boco(struct drm_device *dev)
247 struct amdgpu_device *adev = drm_to_adev(dev);
250 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
256 * amdgpu_device_supports_baco - Does the device support BACO
258 * @dev: drm_device pointer
260 * Returns true if the device supporte BACO,
261 * otherwise return false.
263 bool amdgpu_device_supports_baco(struct drm_device *dev)
265 struct amdgpu_device *adev = drm_to_adev(dev);
267 return amdgpu_asic_supports_baco(adev);
271 * amdgpu_device_supports_smart_shift - Is the device dGPU with
272 * smart shift support
274 * @dev: drm_device pointer
276 * Returns true if the device is a dGPU with Smart Shift support,
277 * otherwise returns false.
279 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
281 return (amdgpu_device_supports_boco(dev) &&
282 amdgpu_acpi_is_power_shift_control_supported());
286 * VRAM access helper functions
290 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
292 * @adev: amdgpu_device pointer
293 * @pos: offset of the buffer in vram
294 * @buf: virtual address of the buffer in system memory
295 * @size: read/write size, sizeof(@buf) must > @size
296 * @write: true - write to vram, otherwise - read from vram
298 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
299 void *buf, size_t size, bool write)
302 uint32_t hi = ~0, tmp = 0;
303 uint32_t *data = buf;
307 if (!drm_dev_enter(adev_to_drm(adev), &idx))
310 BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
312 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
313 for (last = pos + size; pos < last; pos += 4) {
316 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
318 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
322 WREG32_NO_KIQ(mmMM_DATA, *data++);
324 *data++ = RREG32_NO_KIQ(mmMM_DATA);
327 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
332 * amdgpu_device_aper_access - access vram by vram aperature
334 * @adev: amdgpu_device pointer
335 * @pos: offset of the buffer in vram
336 * @buf: virtual address of the buffer in system memory
337 * @size: read/write size, sizeof(@buf) must > @size
338 * @write: true - write to vram, otherwise - read from vram
340 * The return value means how many bytes have been transferred.
342 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
343 void *buf, size_t size, bool write)
350 if (!adev->mman.aper_base_kaddr)
353 last = min(pos + size, adev->gmc.visible_vram_size);
355 addr = adev->mman.aper_base_kaddr + pos;
359 memcpy_toio(addr, buf, count);
361 amdgpu_device_flush_hdp(adev, NULL);
363 amdgpu_device_invalidate_hdp(adev, NULL);
365 memcpy_fromio(buf, addr, count);
377 * amdgpu_device_vram_access - read/write a buffer in vram
379 * @adev: amdgpu_device pointer
380 * @pos: offset of the buffer in vram
381 * @buf: virtual address of the buffer in system memory
382 * @size: read/write size, sizeof(@buf) must > @size
383 * @write: true - write to vram, otherwise - read from vram
385 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
386 void *buf, size_t size, bool write)
390 /* try to using vram apreature to access vram first */
391 count = amdgpu_device_aper_access(adev, pos, buf, size, write);
394 /* using MM to access rest vram */
397 amdgpu_device_mm_access(adev, pos, buf, size, write);
402 * register access helper functions.
405 /* Check if hw access should be skipped because of hotplug or device error */
406 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
408 if (adev->no_hw_access)
411 #ifdef CONFIG_LOCKDEP
413 * This is a bit complicated to understand, so worth a comment. What we assert
414 * here is that the GPU reset is not running on another thread in parallel.
416 * For this we trylock the read side of the reset semaphore, if that succeeds
417 * we know that the reset is not running in paralell.
419 * If the trylock fails we assert that we are either already holding the read
420 * side of the lock or are the reset thread itself and hold the write side of
424 if (down_read_trylock(&adev->reset_domain->sem))
425 up_read(&adev->reset_domain->sem);
427 lockdep_assert_held(&adev->reset_domain->sem);
434 * amdgpu_device_rreg - read a memory mapped IO or indirect register
436 * @adev: amdgpu_device pointer
437 * @reg: dword aligned register offset
438 * @acc_flags: access flags which require special behavior
440 * Returns the 32 bit value from the offset specified.
442 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
443 uint32_t reg, uint32_t acc_flags)
447 if (amdgpu_device_skip_hw_access(adev))
450 if ((reg * 4) < adev->rmmio_size) {
451 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
452 amdgpu_sriov_runtime(adev) &&
453 down_read_trylock(&adev->reset_domain->sem)) {
454 ret = amdgpu_kiq_rreg(adev, reg);
455 up_read(&adev->reset_domain->sem);
457 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
460 ret = adev->pcie_rreg(adev, reg * 4);
463 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
469 * MMIO register read with bytes helper functions
470 * @offset:bytes offset from MMIO start
475 * amdgpu_mm_rreg8 - read a memory mapped IO register
477 * @adev: amdgpu_device pointer
478 * @offset: byte aligned register offset
480 * Returns the 8 bit value from the offset specified.
482 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
484 if (amdgpu_device_skip_hw_access(adev))
487 if (offset < adev->rmmio_size)
488 return (readb(adev->rmmio + offset));
493 * MMIO register write with bytes helper functions
494 * @offset:bytes offset from MMIO start
495 * @value: the value want to be written to the register
499 * amdgpu_mm_wreg8 - read a memory mapped IO register
501 * @adev: amdgpu_device pointer
502 * @offset: byte aligned register offset
503 * @value: 8 bit value to write
505 * Writes the value specified to the offset specified.
507 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
509 if (amdgpu_device_skip_hw_access(adev))
512 if (offset < adev->rmmio_size)
513 writeb(value, adev->rmmio + offset);
519 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
521 * @adev: amdgpu_device pointer
522 * @reg: dword aligned register offset
523 * @v: 32 bit value to write to the register
524 * @acc_flags: access flags which require special behavior
526 * Writes the value specified to the offset specified.
528 void amdgpu_device_wreg(struct amdgpu_device *adev,
529 uint32_t reg, uint32_t v,
532 if (amdgpu_device_skip_hw_access(adev))
535 if ((reg * 4) < adev->rmmio_size) {
536 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
537 amdgpu_sriov_runtime(adev) &&
538 down_read_trylock(&adev->reset_domain->sem)) {
539 amdgpu_kiq_wreg(adev, reg, v);
540 up_read(&adev->reset_domain->sem);
542 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
545 adev->pcie_wreg(adev, reg * 4, v);
548 trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
552 * amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if in range
554 * @adev: amdgpu_device pointer
555 * @reg: mmio/rlc register
558 * this function is invoked only for the debugfs register access
560 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
561 uint32_t reg, uint32_t v)
563 if (amdgpu_device_skip_hw_access(adev))
566 if (amdgpu_sriov_fullaccess(adev) &&
567 adev->gfx.rlc.funcs &&
568 adev->gfx.rlc.funcs->is_rlcg_access_range) {
569 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
570 return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
571 } else if ((reg * 4) >= adev->rmmio_size) {
572 adev->pcie_wreg(adev, reg * 4, v);
574 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
579 * amdgpu_mm_rdoorbell - read a doorbell dword
581 * @adev: amdgpu_device pointer
582 * @index: doorbell index
584 * Returns the value in the doorbell aperture at the
585 * requested doorbell index (CIK).
587 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
589 if (amdgpu_device_skip_hw_access(adev))
592 if (index < adev->doorbell.num_doorbells) {
593 return readl(adev->doorbell.ptr + index);
595 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
601 * amdgpu_mm_wdoorbell - write a doorbell dword
603 * @adev: amdgpu_device pointer
604 * @index: doorbell index
607 * Writes @v to the doorbell aperture at the
608 * requested doorbell index (CIK).
610 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
612 if (amdgpu_device_skip_hw_access(adev))
615 if (index < adev->doorbell.num_doorbells) {
616 writel(v, adev->doorbell.ptr + index);
618 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
623 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
625 * @adev: amdgpu_device pointer
626 * @index: doorbell index
628 * Returns the value in the doorbell aperture at the
629 * requested doorbell index (VEGA10+).
631 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
633 if (amdgpu_device_skip_hw_access(adev))
636 if (index < adev->doorbell.num_doorbells) {
637 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
639 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
645 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
647 * @adev: amdgpu_device pointer
648 * @index: doorbell index
651 * Writes @v to the doorbell aperture at the
652 * requested doorbell index (VEGA10+).
654 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
656 if (amdgpu_device_skip_hw_access(adev))
659 if (index < adev->doorbell.num_doorbells) {
660 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
662 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
667 * amdgpu_device_indirect_rreg - read an indirect register
669 * @adev: amdgpu_device pointer
670 * @pcie_index: mmio register offset
671 * @pcie_data: mmio register offset
672 * @reg_addr: indirect register address to read from
674 * Returns the value of indirect register @reg_addr
676 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
677 u32 pcie_index, u32 pcie_data,
682 void __iomem *pcie_index_offset;
683 void __iomem *pcie_data_offset;
685 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
686 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
687 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
689 writel(reg_addr, pcie_index_offset);
690 readl(pcie_index_offset);
691 r = readl(pcie_data_offset);
692 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
698 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
700 * @adev: amdgpu_device pointer
701 * @pcie_index: mmio register offset
702 * @pcie_data: mmio register offset
703 * @reg_addr: indirect register address to read from
705 * Returns the value of indirect register @reg_addr
707 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
708 u32 pcie_index, u32 pcie_data,
713 void __iomem *pcie_index_offset;
714 void __iomem *pcie_data_offset;
716 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
717 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
718 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
720 /* read low 32 bits */
721 writel(reg_addr, pcie_index_offset);
722 readl(pcie_index_offset);
723 r = readl(pcie_data_offset);
724 /* read high 32 bits */
725 writel(reg_addr + 4, pcie_index_offset);
726 readl(pcie_index_offset);
727 r |= ((u64)readl(pcie_data_offset) << 32);
728 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
734 * amdgpu_device_indirect_wreg - write an indirect register address
736 * @adev: amdgpu_device pointer
737 * @pcie_index: mmio register offset
738 * @pcie_data: mmio register offset
739 * @reg_addr: indirect register offset
740 * @reg_data: indirect register data
743 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
744 u32 pcie_index, u32 pcie_data,
745 u32 reg_addr, u32 reg_data)
748 void __iomem *pcie_index_offset;
749 void __iomem *pcie_data_offset;
751 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
752 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
753 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
755 writel(reg_addr, pcie_index_offset);
756 readl(pcie_index_offset);
757 writel(reg_data, pcie_data_offset);
758 readl(pcie_data_offset);
759 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
763 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
765 * @adev: amdgpu_device pointer
766 * @pcie_index: mmio register offset
767 * @pcie_data: mmio register offset
768 * @reg_addr: indirect register offset
769 * @reg_data: indirect register data
772 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
773 u32 pcie_index, u32 pcie_data,
774 u32 reg_addr, u64 reg_data)
777 void __iomem *pcie_index_offset;
778 void __iomem *pcie_data_offset;
780 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
781 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
782 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
784 /* write low 32 bits */
785 writel(reg_addr, pcie_index_offset);
786 readl(pcie_index_offset);
787 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
788 readl(pcie_data_offset);
789 /* write high 32 bits */
790 writel(reg_addr + 4, pcie_index_offset);
791 readl(pcie_index_offset);
792 writel((u32)(reg_data >> 32), pcie_data_offset);
793 readl(pcie_data_offset);
794 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
798 * amdgpu_invalid_rreg - dummy reg read function
800 * @adev: amdgpu_device pointer
801 * @reg: offset of register
803 * Dummy register read function. Used for register blocks
804 * that certain asics don't have (all asics).
805 * Returns the value in the register.
807 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
809 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
815 * amdgpu_invalid_wreg - dummy reg write function
817 * @adev: amdgpu_device pointer
818 * @reg: offset of register
819 * @v: value to write to the register
821 * Dummy register read function. Used for register blocks
822 * that certain asics don't have (all asics).
824 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
826 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
832 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
834 * @adev: amdgpu_device pointer
835 * @reg: offset of register
837 * Dummy register read function. Used for register blocks
838 * that certain asics don't have (all asics).
839 * Returns the value in the register.
841 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
843 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
849 * amdgpu_invalid_wreg64 - dummy reg write function
851 * @adev: amdgpu_device pointer
852 * @reg: offset of register
853 * @v: value to write to the register
855 * Dummy register read function. Used for register blocks
856 * that certain asics don't have (all asics).
858 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
860 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
866 * amdgpu_block_invalid_rreg - dummy reg read function
868 * @adev: amdgpu_device pointer
869 * @block: offset of instance
870 * @reg: offset of register
872 * Dummy register read function. Used for register blocks
873 * that certain asics don't have (all asics).
874 * Returns the value in the register.
876 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
877 uint32_t block, uint32_t reg)
879 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
886 * amdgpu_block_invalid_wreg - dummy reg write function
888 * @adev: amdgpu_device pointer
889 * @block: offset of instance
890 * @reg: offset of register
891 * @v: value to write to the register
893 * Dummy register read function. Used for register blocks
894 * that certain asics don't have (all asics).
896 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
898 uint32_t reg, uint32_t v)
900 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
906 * amdgpu_device_asic_init - Wrapper for atom asic_init
908 * @adev: amdgpu_device pointer
910 * Does any asic specific work and then calls atom asic init.
912 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
914 amdgpu_asic_pre_asic_init(adev);
916 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
917 return amdgpu_atomfirmware_asic_init(adev, true);
919 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
923 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
925 * @adev: amdgpu_device pointer
927 * Allocates a scratch page of VRAM for use by various things in the
930 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
932 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
933 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
934 &adev->vram_scratch.robj,
935 &adev->vram_scratch.gpu_addr,
936 (void **)&adev->vram_scratch.ptr);
940 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
942 * @adev: amdgpu_device pointer
944 * Frees the VRAM scratch page.
946 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
948 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
952 * amdgpu_device_program_register_sequence - program an array of registers.
954 * @adev: amdgpu_device pointer
955 * @registers: pointer to the register array
956 * @array_size: size of the register array
958 * Programs an array or registers with and and or masks.
959 * This is a helper for setting golden registers.
961 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
962 const u32 *registers,
963 const u32 array_size)
965 u32 tmp, reg, and_mask, or_mask;
971 for (i = 0; i < array_size; i +=3) {
972 reg = registers[i + 0];
973 and_mask = registers[i + 1];
974 or_mask = registers[i + 2];
976 if (and_mask == 0xffffffff) {
981 if (adev->family >= AMDGPU_FAMILY_AI)
982 tmp |= (or_mask & and_mask);
991 * amdgpu_device_pci_config_reset - reset the GPU
993 * @adev: amdgpu_device pointer
995 * Resets the GPU using the pci config reset sequence.
996 * Only applicable to asics prior to vega10.
998 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1000 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1004 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1006 * @adev: amdgpu_device pointer
1008 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1010 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1012 return pci_reset_function(adev->pdev);
1016 * GPU doorbell aperture helpers function.
1019 * amdgpu_device_doorbell_init - Init doorbell driver information.
1021 * @adev: amdgpu_device pointer
1023 * Init doorbell driver information (CIK)
1024 * Returns 0 on success, error on failure.
1026 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1029 /* No doorbell on SI hardware generation */
1030 if (adev->asic_type < CHIP_BONAIRE) {
1031 adev->doorbell.base = 0;
1032 adev->doorbell.size = 0;
1033 adev->doorbell.num_doorbells = 0;
1034 adev->doorbell.ptr = NULL;
1038 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1041 amdgpu_asic_init_doorbell_index(adev);
1043 /* doorbell bar mapping */
1044 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1045 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1047 if (adev->enable_mes) {
1048 adev->doorbell.num_doorbells =
1049 adev->doorbell.size / sizeof(u32);
1051 adev->doorbell.num_doorbells =
1052 min_t(u32, adev->doorbell.size / sizeof(u32),
1053 adev->doorbell_index.max_assignment+1);
1054 if (adev->doorbell.num_doorbells == 0)
1057 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
1058 * paging queue doorbell use the second page. The
1059 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1060 * doorbells are in the first page. So with paging queue enabled,
1061 * the max num_doorbells should + 1 page (0x400 in dword)
1063 if (adev->asic_type >= CHIP_VEGA10)
1064 adev->doorbell.num_doorbells += 0x400;
1067 adev->doorbell.ptr = ioremap(adev->doorbell.base,
1068 adev->doorbell.num_doorbells *
1070 if (adev->doorbell.ptr == NULL)
1077 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1079 * @adev: amdgpu_device pointer
1081 * Tear down doorbell driver information (CIK)
1083 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1085 iounmap(adev->doorbell.ptr);
1086 adev->doorbell.ptr = NULL;
1092 * amdgpu_device_wb_*()
1093 * Writeback is the method by which the GPU updates special pages in memory
1094 * with the status of certain GPU events (fences, ring pointers,etc.).
1098 * amdgpu_device_wb_fini - Disable Writeback and free memory
1100 * @adev: amdgpu_device pointer
1102 * Disables Writeback and frees the Writeback memory (all asics).
1103 * Used at driver shutdown.
1105 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1107 if (adev->wb.wb_obj) {
1108 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1110 (void **)&adev->wb.wb);
1111 adev->wb.wb_obj = NULL;
1116 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1118 * @adev: amdgpu_device pointer
1120 * Initializes writeback and allocates writeback memory (all asics).
1121 * Used at driver startup.
1122 * Returns 0 on success or an -error on failure.
1124 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1128 if (adev->wb.wb_obj == NULL) {
1129 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1130 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1131 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1132 &adev->wb.wb_obj, &adev->wb.gpu_addr,
1133 (void **)&adev->wb.wb);
1135 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1139 adev->wb.num_wb = AMDGPU_MAX_WB;
1140 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1142 /* clear wb memory */
1143 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1150 * amdgpu_device_wb_get - Allocate a wb entry
1152 * @adev: amdgpu_device pointer
1155 * Allocate a wb slot for use by the driver (all asics).
1156 * Returns 0 on success or -EINVAL on failure.
1158 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1160 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1162 if (offset < adev->wb.num_wb) {
1163 __set_bit(offset, adev->wb.used);
1164 *wb = offset << 3; /* convert to dw offset */
1172 * amdgpu_device_wb_free - Free a wb entry
1174 * @adev: amdgpu_device pointer
1177 * Free a wb slot allocated for use by the driver (all asics)
1179 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1182 if (wb < adev->wb.num_wb)
1183 __clear_bit(wb, adev->wb.used);
1187 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1189 * @adev: amdgpu_device pointer
1191 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1192 * to fail, but if any of the BARs is not accessible after the size we abort
1193 * driver loading by returning -ENODEV.
1195 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1197 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1198 struct pci_bus *root;
1199 struct resource *res;
1205 if (amdgpu_sriov_vf(adev))
1208 /* skip if the bios has already enabled large BAR */
1209 if (adev->gmc.real_vram_size &&
1210 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1213 /* Check if the root BUS has 64bit memory resources */
1214 root = adev->pdev->bus;
1215 while (root->parent)
1216 root = root->parent;
1218 pci_bus_for_each_resource(root, res, i) {
1219 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1220 res->start > 0x100000000ull)
1224 /* Trying to resize is pointless without a root hub window above 4GB */
1228 /* Limit the BAR size to what is available */
1229 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1232 /* Disable memory decoding while we change the BAR addresses and size */
1233 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1234 pci_write_config_word(adev->pdev, PCI_COMMAND,
1235 cmd & ~PCI_COMMAND_MEMORY);
1237 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1238 amdgpu_device_doorbell_fini(adev);
1239 if (adev->asic_type >= CHIP_BONAIRE)
1240 pci_release_resource(adev->pdev, 2);
1242 pci_release_resource(adev->pdev, 0);
1244 r = pci_resize_resource(adev->pdev, 0, rbar_size);
1246 DRM_INFO("Not enough PCI address space for a large BAR.");
1247 else if (r && r != -ENOTSUPP)
1248 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1250 pci_assign_unassigned_bus_resources(adev->pdev->bus);
1252 /* When the doorbell or fb BAR isn't available we have no chance of
1255 r = amdgpu_device_doorbell_init(adev);
1256 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1259 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1265 * GPU helpers function.
1268 * amdgpu_device_need_post - check if the hw need post or not
1270 * @adev: amdgpu_device pointer
1272 * Check if the asic has been initialized (all asics) at driver startup
1273 * or post is needed if hw reset is performed.
1274 * Returns true if need or false if not.
1276 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1280 if (amdgpu_sriov_vf(adev))
1283 if (amdgpu_passthrough(adev)) {
1284 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1285 * some old smc fw still need driver do vPost otherwise gpu hang, while
1286 * those smc fw version above 22.15 doesn't have this flaw, so we force
1287 * vpost executed for smc version below 22.15
1289 if (adev->asic_type == CHIP_FIJI) {
1292 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1293 /* force vPost if error occured */
1297 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1298 if (fw_ver < 0x00160e00)
1303 /* Don't post if we need to reset whole hive on init */
1304 if (adev->gmc.xgmi.pending_reset)
1307 if (adev->has_hw_reset) {
1308 adev->has_hw_reset = false;
1312 /* bios scratch used on CIK+ */
1313 if (adev->asic_type >= CHIP_BONAIRE)
1314 return amdgpu_atombios_scratch_need_asic_init(adev);
1316 /* check MEM_SIZE for older asics */
1317 reg = amdgpu_asic_get_config_memsize(adev);
1319 if ((reg != 0) && (reg != 0xffffffff))
1326 * amdgpu_device_should_use_aspm - check if the device should program ASPM
1328 * @adev: amdgpu_device pointer
1330 * Confirm whether the module parameter and pcie bridge agree that ASPM should
1331 * be set for this device.
1333 * Returns true if it should be used or false if not.
1335 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1337 switch (amdgpu_aspm) {
1347 return pcie_aspm_enabled(adev->pdev);
1350 /* if we get transitioned to only one device, take VGA back */
1352 * amdgpu_device_vga_set_decode - enable/disable vga decode
1354 * @pdev: PCI device pointer
1355 * @state: enable/disable vga decode
1357 * Enable/disable vga decode (all asics).
1358 * Returns VGA resource flags.
1360 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1363 struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1364 amdgpu_asic_set_vga_state(adev, state);
1366 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1367 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1369 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1373 * amdgpu_device_check_block_size - validate the vm block size
1375 * @adev: amdgpu_device pointer
1377 * Validates the vm block size specified via module parameter.
1378 * The vm block size defines number of bits in page table versus page directory,
1379 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1380 * page table and the remaining bits are in the page directory.
1382 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1384 /* defines number of bits in page table versus page directory,
1385 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1386 * page table and the remaining bits are in the page directory */
1387 if (amdgpu_vm_block_size == -1)
1390 if (amdgpu_vm_block_size < 9) {
1391 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1392 amdgpu_vm_block_size);
1393 amdgpu_vm_block_size = -1;
1398 * amdgpu_device_check_vm_size - validate the vm size
1400 * @adev: amdgpu_device pointer
1402 * Validates the vm size in GB specified via module parameter.
1403 * The VM size is the size of the GPU virtual memory space in GB.
1405 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1407 /* no need to check the default value */
1408 if (amdgpu_vm_size == -1)
1411 if (amdgpu_vm_size < 1) {
1412 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1414 amdgpu_vm_size = -1;
1418 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1421 bool is_os_64 = (sizeof(void *) == 8);
1422 uint64_t total_memory;
1423 uint64_t dram_size_seven_GB = 0x1B8000000;
1424 uint64_t dram_size_three_GB = 0xB8000000;
1426 if (amdgpu_smu_memory_pool_size == 0)
1430 DRM_WARN("Not 64-bit OS, feature not supported\n");
1434 total_memory = (uint64_t)si.totalram * si.mem_unit;
1436 if ((amdgpu_smu_memory_pool_size == 1) ||
1437 (amdgpu_smu_memory_pool_size == 2)) {
1438 if (total_memory < dram_size_three_GB)
1440 } else if ((amdgpu_smu_memory_pool_size == 4) ||
1441 (amdgpu_smu_memory_pool_size == 8)) {
1442 if (total_memory < dram_size_seven_GB)
1445 DRM_WARN("Smu memory pool size not supported\n");
1448 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1453 DRM_WARN("No enough system memory\n");
1455 adev->pm.smu_prv_buffer_size = 0;
1458 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1460 if (!(adev->flags & AMD_IS_APU) ||
1461 adev->asic_type < CHIP_RAVEN)
1464 switch (adev->asic_type) {
1466 if (adev->pdev->device == 0x15dd)
1467 adev->apu_flags |= AMD_APU_IS_RAVEN;
1468 if (adev->pdev->device == 0x15d8)
1469 adev->apu_flags |= AMD_APU_IS_PICASSO;
1472 if ((adev->pdev->device == 0x1636) ||
1473 (adev->pdev->device == 0x164c))
1474 adev->apu_flags |= AMD_APU_IS_RENOIR;
1476 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1479 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1481 case CHIP_YELLOW_CARP:
1483 case CHIP_CYAN_SKILLFISH:
1484 if ((adev->pdev->device == 0x13FE) ||
1485 (adev->pdev->device == 0x143F))
1486 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1496 * amdgpu_device_check_arguments - validate module params
1498 * @adev: amdgpu_device pointer
1500 * Validates certain module parameters and updates
1501 * the associated values used by the driver (all asics).
1503 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1505 if (amdgpu_sched_jobs < 4) {
1506 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1508 amdgpu_sched_jobs = 4;
1509 } else if (!is_power_of_2(amdgpu_sched_jobs)){
1510 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1512 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1515 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1516 /* gart size must be greater or equal to 32M */
1517 dev_warn(adev->dev, "gart size (%d) too small\n",
1519 amdgpu_gart_size = -1;
1522 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1523 /* gtt size must be greater or equal to 32M */
1524 dev_warn(adev->dev, "gtt size (%d) too small\n",
1526 amdgpu_gtt_size = -1;
1529 /* valid range is between 4 and 9 inclusive */
1530 if (amdgpu_vm_fragment_size != -1 &&
1531 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1532 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1533 amdgpu_vm_fragment_size = -1;
1536 if (amdgpu_sched_hw_submission < 2) {
1537 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1538 amdgpu_sched_hw_submission);
1539 amdgpu_sched_hw_submission = 2;
1540 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1541 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1542 amdgpu_sched_hw_submission);
1543 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1546 if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1547 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1548 amdgpu_reset_method = -1;
1551 amdgpu_device_check_smu_prv_buffer_size(adev);
1553 amdgpu_device_check_vm_size(adev);
1555 amdgpu_device_check_block_size(adev);
1557 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1563 * amdgpu_switcheroo_set_state - set switcheroo state
1565 * @pdev: pci dev pointer
1566 * @state: vga_switcheroo state
1568 * Callback for the switcheroo driver. Suspends or resumes the
1569 * the asics before or after it is powered up using ACPI methods.
1571 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1572 enum vga_switcheroo_state state)
1574 struct drm_device *dev = pci_get_drvdata(pdev);
1577 if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1580 if (state == VGA_SWITCHEROO_ON) {
1581 pr_info("switched on\n");
1582 /* don't suspend or resume card normally */
1583 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1585 pci_set_power_state(pdev, PCI_D0);
1586 amdgpu_device_load_pci_state(pdev);
1587 r = pci_enable_device(pdev);
1589 DRM_WARN("pci_enable_device failed (%d)\n", r);
1590 amdgpu_device_resume(dev, true);
1592 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1594 pr_info("switched off\n");
1595 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1596 amdgpu_device_suspend(dev, true);
1597 amdgpu_device_cache_pci_state(pdev);
1598 /* Shut down the device */
1599 pci_disable_device(pdev);
1600 pci_set_power_state(pdev, PCI_D3cold);
1601 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1606 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1608 * @pdev: pci dev pointer
1610 * Callback for the switcheroo driver. Check of the switcheroo
1611 * state can be changed.
1612 * Returns true if the state can be changed, false if not.
1614 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1616 struct drm_device *dev = pci_get_drvdata(pdev);
1619 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1620 * locking inversion with the driver load path. And the access here is
1621 * completely racy anyway. So don't bother with locking for now.
1623 return atomic_read(&dev->open_count) == 0;
1626 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1627 .set_gpu_state = amdgpu_switcheroo_set_state,
1629 .can_switch = amdgpu_switcheroo_can_switch,
1633 * amdgpu_device_ip_set_clockgating_state - set the CG state
1635 * @dev: amdgpu_device pointer
1636 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1637 * @state: clockgating state (gate or ungate)
1639 * Sets the requested clockgating state for all instances of
1640 * the hardware IP specified.
1641 * Returns the error code from the last instance.
1643 int amdgpu_device_ip_set_clockgating_state(void *dev,
1644 enum amd_ip_block_type block_type,
1645 enum amd_clockgating_state state)
1647 struct amdgpu_device *adev = dev;
1650 for (i = 0; i < adev->num_ip_blocks; i++) {
1651 if (!adev->ip_blocks[i].status.valid)
1653 if (adev->ip_blocks[i].version->type != block_type)
1655 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1657 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1658 (void *)adev, state);
1660 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1661 adev->ip_blocks[i].version->funcs->name, r);
1667 * amdgpu_device_ip_set_powergating_state - set the PG state
1669 * @dev: amdgpu_device pointer
1670 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1671 * @state: powergating state (gate or ungate)
1673 * Sets the requested powergating state for all instances of
1674 * the hardware IP specified.
1675 * Returns the error code from the last instance.
1677 int amdgpu_device_ip_set_powergating_state(void *dev,
1678 enum amd_ip_block_type block_type,
1679 enum amd_powergating_state state)
1681 struct amdgpu_device *adev = dev;
1684 for (i = 0; i < adev->num_ip_blocks; i++) {
1685 if (!adev->ip_blocks[i].status.valid)
1687 if (adev->ip_blocks[i].version->type != block_type)
1689 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1691 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1692 (void *)adev, state);
1694 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1695 adev->ip_blocks[i].version->funcs->name, r);
1701 * amdgpu_device_ip_get_clockgating_state - get the CG state
1703 * @adev: amdgpu_device pointer
1704 * @flags: clockgating feature flags
1706 * Walks the list of IPs on the device and updates the clockgating
1707 * flags for each IP.
1708 * Updates @flags with the feature flags for each hardware IP where
1709 * clockgating is enabled.
1711 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1716 for (i = 0; i < adev->num_ip_blocks; i++) {
1717 if (!adev->ip_blocks[i].status.valid)
1719 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1720 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1725 * amdgpu_device_ip_wait_for_idle - wait for idle
1727 * @adev: amdgpu_device pointer
1728 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1730 * Waits for the request hardware IP to be idle.
1731 * Returns 0 for success or a negative error code on failure.
1733 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1734 enum amd_ip_block_type block_type)
1738 for (i = 0; i < adev->num_ip_blocks; i++) {
1739 if (!adev->ip_blocks[i].status.valid)
1741 if (adev->ip_blocks[i].version->type == block_type) {
1742 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1753 * amdgpu_device_ip_is_idle - is the hardware IP idle
1755 * @adev: amdgpu_device pointer
1756 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1758 * Check if the hardware IP is idle or not.
1759 * Returns true if it the IP is idle, false if not.
1761 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1762 enum amd_ip_block_type block_type)
1766 for (i = 0; i < adev->num_ip_blocks; i++) {
1767 if (!adev->ip_blocks[i].status.valid)
1769 if (adev->ip_blocks[i].version->type == block_type)
1770 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1777 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1779 * @adev: amdgpu_device pointer
1780 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1782 * Returns a pointer to the hardware IP block structure
1783 * if it exists for the asic, otherwise NULL.
1785 struct amdgpu_ip_block *
1786 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1787 enum amd_ip_block_type type)
1791 for (i = 0; i < adev->num_ip_blocks; i++)
1792 if (adev->ip_blocks[i].version->type == type)
1793 return &adev->ip_blocks[i];
1799 * amdgpu_device_ip_block_version_cmp
1801 * @adev: amdgpu_device pointer
1802 * @type: enum amd_ip_block_type
1803 * @major: major version
1804 * @minor: minor version
1806 * return 0 if equal or greater
1807 * return 1 if smaller or the ip_block doesn't exist
1809 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1810 enum amd_ip_block_type type,
1811 u32 major, u32 minor)
1813 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1815 if (ip_block && ((ip_block->version->major > major) ||
1816 ((ip_block->version->major == major) &&
1817 (ip_block->version->minor >= minor))))
1824 * amdgpu_device_ip_block_add
1826 * @adev: amdgpu_device pointer
1827 * @ip_block_version: pointer to the IP to add
1829 * Adds the IP block driver information to the collection of IPs
1832 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1833 const struct amdgpu_ip_block_version *ip_block_version)
1835 if (!ip_block_version)
1838 switch (ip_block_version->type) {
1839 case AMD_IP_BLOCK_TYPE_VCN:
1840 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1843 case AMD_IP_BLOCK_TYPE_JPEG:
1844 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1851 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1852 ip_block_version->funcs->name);
1854 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1860 * amdgpu_device_enable_virtual_display - enable virtual display feature
1862 * @adev: amdgpu_device pointer
1864 * Enabled the virtual display feature if the user has enabled it via
1865 * the module parameter virtual_display. This feature provides a virtual
1866 * display hardware on headless boards or in virtualized environments.
1867 * This function parses and validates the configuration string specified by
1868 * the user and configues the virtual display configuration (number of
1869 * virtual connectors, crtcs, etc.) specified.
1871 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1873 adev->enable_virtual_display = false;
1875 if (amdgpu_virtual_display) {
1876 const char *pci_address_name = pci_name(adev->pdev);
1877 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1879 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1880 pciaddstr_tmp = pciaddstr;
1881 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1882 pciaddname = strsep(&pciaddname_tmp, ",");
1883 if (!strcmp("all", pciaddname)
1884 || !strcmp(pci_address_name, pciaddname)) {
1888 adev->enable_virtual_display = true;
1891 res = kstrtol(pciaddname_tmp, 10,
1899 adev->mode_info.num_crtc = num_crtc;
1901 adev->mode_info.num_crtc = 1;
1907 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1908 amdgpu_virtual_display, pci_address_name,
1909 adev->enable_virtual_display, adev->mode_info.num_crtc);
1916 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1918 * @adev: amdgpu_device pointer
1920 * Parses the asic configuration parameters specified in the gpu info
1921 * firmware and makes them availale to the driver for use in configuring
1923 * Returns 0 on success, -EINVAL on failure.
1925 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1927 const char *chip_name;
1930 const struct gpu_info_firmware_header_v1_0 *hdr;
1932 adev->firmware.gpu_info_fw = NULL;
1934 if (adev->mman.discovery_bin) {
1936 * FIXME: The bounding box is still needed by Navi12, so
1937 * temporarily read it from gpu_info firmware. Should be dropped
1938 * when DAL no longer needs it.
1940 if (adev->asic_type != CHIP_NAVI12)
1944 switch (adev->asic_type) {
1948 chip_name = "vega10";
1951 chip_name = "vega12";
1954 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1955 chip_name = "raven2";
1956 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1957 chip_name = "picasso";
1959 chip_name = "raven";
1962 chip_name = "arcturus";
1965 chip_name = "navi12";
1969 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1970 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1973 "Failed to load gpu_info firmware \"%s\"\n",
1977 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1980 "Failed to validate gpu_info firmware \"%s\"\n",
1985 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1986 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1988 switch (hdr->version_major) {
1991 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1992 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1993 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1996 * Should be droped when DAL no longer needs it.
1998 if (adev->asic_type == CHIP_NAVI12)
1999 goto parse_soc_bounding_box;
2001 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2002 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2003 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2004 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2005 adev->gfx.config.max_texture_channel_caches =
2006 le32_to_cpu(gpu_info_fw->gc_num_tccs);
2007 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2008 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2009 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2010 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2011 adev->gfx.config.double_offchip_lds_buf =
2012 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2013 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2014 adev->gfx.cu_info.max_waves_per_simd =
2015 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2016 adev->gfx.cu_info.max_scratch_slots_per_cu =
2017 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2018 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2019 if (hdr->version_minor >= 1) {
2020 const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2021 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2022 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2023 adev->gfx.config.num_sc_per_sh =
2024 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2025 adev->gfx.config.num_packer_per_sc =
2026 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2029 parse_soc_bounding_box:
2031 * soc bounding box info is not integrated in disocovery table,
2032 * we always need to parse it from gpu info firmware if needed.
2034 if (hdr->version_minor == 2) {
2035 const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2036 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2037 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2038 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2044 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2053 * amdgpu_device_ip_early_init - run early init for hardware IPs
2055 * @adev: amdgpu_device pointer
2057 * Early initialization pass for hardware IPs. The hardware IPs that make
2058 * up each asic are discovered each IP's early_init callback is run. This
2059 * is the first stage in initializing the asic.
2060 * Returns 0 on success, negative error code on failure.
2062 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2064 struct drm_device *dev = adev_to_drm(adev);
2065 struct pci_dev *parent;
2068 amdgpu_device_enable_virtual_display(adev);
2070 if (amdgpu_sriov_vf(adev)) {
2071 r = amdgpu_virt_request_full_gpu(adev, true);
2076 switch (adev->asic_type) {
2077 #ifdef CONFIG_DRM_AMDGPU_SI
2083 adev->family = AMDGPU_FAMILY_SI;
2084 r = si_set_ip_blocks(adev);
2089 #ifdef CONFIG_DRM_AMDGPU_CIK
2095 if (adev->flags & AMD_IS_APU)
2096 adev->family = AMDGPU_FAMILY_KV;
2098 adev->family = AMDGPU_FAMILY_CI;
2100 r = cik_set_ip_blocks(adev);
2108 case CHIP_POLARIS10:
2109 case CHIP_POLARIS11:
2110 case CHIP_POLARIS12:
2114 if (adev->flags & AMD_IS_APU)
2115 adev->family = AMDGPU_FAMILY_CZ;
2117 adev->family = AMDGPU_FAMILY_VI;
2119 r = vi_set_ip_blocks(adev);
2124 r = amdgpu_discovery_set_ip_blocks(adev);
2130 if (amdgpu_has_atpx() &&
2131 (amdgpu_is_atpx_hybrid() ||
2132 amdgpu_has_atpx_dgpu_power_cntl()) &&
2133 ((adev->flags & AMD_IS_APU) == 0) &&
2134 !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
2135 adev->flags |= AMD_IS_PX;
2137 if (!(adev->flags & AMD_IS_APU)) {
2138 parent = pci_upstream_bridge(adev->pdev);
2139 adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2142 amdgpu_amdkfd_device_probe(adev);
2144 adev->pm.pp_feature = amdgpu_pp_feature_mask;
2145 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2146 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2147 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2148 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2150 for (i = 0; i < adev->num_ip_blocks; i++) {
2151 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2152 DRM_ERROR("disabled ip block: %d <%s>\n",
2153 i, adev->ip_blocks[i].version->funcs->name);
2154 adev->ip_blocks[i].status.valid = false;
2156 if (adev->ip_blocks[i].version->funcs->early_init) {
2157 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2159 adev->ip_blocks[i].status.valid = false;
2161 DRM_ERROR("early_init of IP block <%s> failed %d\n",
2162 adev->ip_blocks[i].version->funcs->name, r);
2165 adev->ip_blocks[i].status.valid = true;
2168 adev->ip_blocks[i].status.valid = true;
2171 /* get the vbios after the asic_funcs are set up */
2172 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2173 r = amdgpu_device_parse_gpu_info_fw(adev);
2178 if (!amdgpu_get_bios(adev))
2181 r = amdgpu_atombios_init(adev);
2183 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2184 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2188 /*get pf2vf msg info at it's earliest time*/
2189 if (amdgpu_sriov_vf(adev))
2190 amdgpu_virt_init_data_exchange(adev);
2195 adev->cg_flags &= amdgpu_cg_mask;
2196 adev->pg_flags &= amdgpu_pg_mask;
2201 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2205 for (i = 0; i < adev->num_ip_blocks; i++) {
2206 if (!adev->ip_blocks[i].status.sw)
2208 if (adev->ip_blocks[i].status.hw)
2210 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2211 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2212 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2213 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2215 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2216 adev->ip_blocks[i].version->funcs->name, r);
2219 adev->ip_blocks[i].status.hw = true;
2226 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2230 for (i = 0; i < adev->num_ip_blocks; i++) {
2231 if (!adev->ip_blocks[i].status.sw)
2233 if (adev->ip_blocks[i].status.hw)
2235 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2237 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2238 adev->ip_blocks[i].version->funcs->name, r);
2241 adev->ip_blocks[i].status.hw = true;
2247 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2251 uint32_t smu_version;
2253 if (adev->asic_type >= CHIP_VEGA10) {
2254 for (i = 0; i < adev->num_ip_blocks; i++) {
2255 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2258 if (!adev->ip_blocks[i].status.sw)
2261 /* no need to do the fw loading again if already done*/
2262 if (adev->ip_blocks[i].status.hw == true)
2265 if (amdgpu_in_reset(adev) || adev->in_suspend) {
2266 r = adev->ip_blocks[i].version->funcs->resume(adev);
2268 DRM_ERROR("resume of IP block <%s> failed %d\n",
2269 adev->ip_blocks[i].version->funcs->name, r);
2273 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2275 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2276 adev->ip_blocks[i].version->funcs->name, r);
2281 adev->ip_blocks[i].status.hw = true;
2286 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2287 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2292 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2297 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2298 struct amdgpu_ring *ring = adev->rings[i];
2300 /* No need to setup the GPU scheduler for rings that don't need it */
2301 if (!ring || ring->no_scheduler)
2304 switch (ring->funcs->type) {
2305 case AMDGPU_RING_TYPE_GFX:
2306 timeout = adev->gfx_timeout;
2308 case AMDGPU_RING_TYPE_COMPUTE:
2309 timeout = adev->compute_timeout;
2311 case AMDGPU_RING_TYPE_SDMA:
2312 timeout = adev->sdma_timeout;
2315 timeout = adev->video_timeout;
2319 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
2320 ring->num_hw_submission, amdgpu_job_hang_limit,
2321 timeout, adev->reset_domain->wq,
2322 ring->sched_score, ring->name,
2325 DRM_ERROR("Failed to create scheduler on ring %s.\n",
2336 * amdgpu_device_ip_init - run init for hardware IPs
2338 * @adev: amdgpu_device pointer
2340 * Main initialization pass for hardware IPs. The list of all the hardware
2341 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2342 * are run. sw_init initializes the software state associated with each IP
2343 * and hw_init initializes the hardware associated with each IP.
2344 * Returns 0 on success, negative error code on failure.
2346 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2350 r = amdgpu_ras_init(adev);
2354 for (i = 0; i < adev->num_ip_blocks; i++) {
2355 if (!adev->ip_blocks[i].status.valid)
2357 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2359 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2360 adev->ip_blocks[i].version->funcs->name, r);
2363 adev->ip_blocks[i].status.sw = true;
2365 /* need to do gmc hw init early so we can allocate gpu mem */
2366 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2367 /* Try to reserve bad pages early */
2368 if (amdgpu_sriov_vf(adev))
2369 amdgpu_virt_exchange_data(adev);
2371 r = amdgpu_device_vram_scratch_init(adev);
2373 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2376 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2378 DRM_ERROR("hw_init %d failed %d\n", i, r);
2381 r = amdgpu_device_wb_init(adev);
2383 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2386 adev->ip_blocks[i].status.hw = true;
2388 /* right after GMC hw init, we create CSA */
2389 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2390 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2391 AMDGPU_GEM_DOMAIN_VRAM,
2394 DRM_ERROR("allocate CSA failed %d\n", r);
2401 if (amdgpu_sriov_vf(adev))
2402 amdgpu_virt_init_data_exchange(adev);
2404 r = amdgpu_ib_pool_init(adev);
2406 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2407 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2411 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2415 r = amdgpu_device_ip_hw_init_phase1(adev);
2419 r = amdgpu_device_fw_loading(adev);
2423 r = amdgpu_device_ip_hw_init_phase2(adev);
2428 * retired pages will be loaded from eeprom and reserved here,
2429 * it should be called after amdgpu_device_ip_hw_init_phase2 since
2430 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2431 * for I2C communication which only true at this point.
2433 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2434 * failure from bad gpu situation and stop amdgpu init process
2435 * accordingly. For other failed cases, it will still release all
2436 * the resource and print error message, rather than returning one
2437 * negative value to upper level.
2439 * Note: theoretically, this should be called before all vram allocations
2440 * to protect retired page from abusing
2442 r = amdgpu_ras_recovery_init(adev);
2447 * In case of XGMI grab extra reference for reset domain for this device
2449 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2450 if (amdgpu_xgmi_add_device(adev) == 0) {
2451 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2453 if (!hive->reset_domain ||
2454 !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2459 /* Drop the early temporary reset domain we created for device */
2460 amdgpu_reset_put_reset_domain(adev->reset_domain);
2461 adev->reset_domain = hive->reset_domain;
2465 r = amdgpu_device_init_schedulers(adev);
2469 /* Don't init kfd if whole hive need to be reset during init */
2470 if (!adev->gmc.xgmi.pending_reset)
2471 amdgpu_amdkfd_device_init(adev);
2473 amdgpu_fru_get_product_info(adev);
2476 if (amdgpu_sriov_vf(adev))
2477 amdgpu_virt_release_full_gpu(adev, true);
2483 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2485 * @adev: amdgpu_device pointer
2487 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
2488 * this function before a GPU reset. If the value is retained after a
2489 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
2491 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2493 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2497 * amdgpu_device_check_vram_lost - check if vram is valid
2499 * @adev: amdgpu_device pointer
2501 * Checks the reset magic value written to the gart pointer in VRAM.
2502 * The driver calls this after a GPU reset to see if the contents of
2503 * VRAM is lost or now.
2504 * returns true if vram is lost, false if not.
2506 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2508 if (memcmp(adev->gart.ptr, adev->reset_magic,
2509 AMDGPU_RESET_MAGIC_NUM))
2512 if (!amdgpu_in_reset(adev))
2516 * For all ASICs with baco/mode1 reset, the VRAM is
2517 * always assumed to be lost.
2519 switch (amdgpu_asic_reset_method(adev)) {
2520 case AMD_RESET_METHOD_BACO:
2521 case AMD_RESET_METHOD_MODE1:
2529 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2531 * @adev: amdgpu_device pointer
2532 * @state: clockgating state (gate or ungate)
2534 * The list of all the hardware IPs that make up the asic is walked and the
2535 * set_clockgating_state callbacks are run.
2536 * Late initialization pass enabling clockgating for hardware IPs.
2537 * Fini or suspend, pass disabling clockgating for hardware IPs.
2538 * Returns 0 on success, negative error code on failure.
2541 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2542 enum amd_clockgating_state state)
2546 if (amdgpu_emu_mode == 1)
2549 for (j = 0; j < adev->num_ip_blocks; j++) {
2550 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2551 if (!adev->ip_blocks[i].status.late_initialized)
2553 /* skip CG for GFX on S0ix */
2554 if (adev->in_s0ix &&
2555 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2557 /* skip CG for VCE/UVD, it's handled specially */
2558 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2559 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2560 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2561 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2562 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2563 /* enable clockgating to save power */
2564 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2567 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2568 adev->ip_blocks[i].version->funcs->name, r);
2577 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2578 enum amd_powergating_state state)
2582 if (amdgpu_emu_mode == 1)
2585 for (j = 0; j < adev->num_ip_blocks; j++) {
2586 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2587 if (!adev->ip_blocks[i].status.late_initialized)
2589 /* skip PG for GFX on S0ix */
2590 if (adev->in_s0ix &&
2591 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2593 /* skip CG for VCE/UVD, it's handled specially */
2594 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2595 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2596 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2597 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2598 adev->ip_blocks[i].version->funcs->set_powergating_state) {
2599 /* enable powergating to save power */
2600 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2603 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2604 adev->ip_blocks[i].version->funcs->name, r);
2612 static int amdgpu_device_enable_mgpu_fan_boost(void)
2614 struct amdgpu_gpu_instance *gpu_ins;
2615 struct amdgpu_device *adev;
2618 mutex_lock(&mgpu_info.mutex);
2621 * MGPU fan boost feature should be enabled
2622 * only when there are two or more dGPUs in
2625 if (mgpu_info.num_dgpu < 2)
2628 for (i = 0; i < mgpu_info.num_dgpu; i++) {
2629 gpu_ins = &(mgpu_info.gpu_ins[i]);
2630 adev = gpu_ins->adev;
2631 if (!(adev->flags & AMD_IS_APU) &&
2632 !gpu_ins->mgpu_fan_enabled) {
2633 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2637 gpu_ins->mgpu_fan_enabled = 1;
2642 mutex_unlock(&mgpu_info.mutex);
2648 * amdgpu_device_ip_late_init - run late init for hardware IPs
2650 * @adev: amdgpu_device pointer
2652 * Late initialization pass for hardware IPs. The list of all the hardware
2653 * IPs that make up the asic is walked and the late_init callbacks are run.
2654 * late_init covers any special initialization that an IP requires
2655 * after all of the have been initialized or something that needs to happen
2656 * late in the init process.
2657 * Returns 0 on success, negative error code on failure.
2659 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2661 struct amdgpu_gpu_instance *gpu_instance;
2664 for (i = 0; i < adev->num_ip_blocks; i++) {
2665 if (!adev->ip_blocks[i].status.hw)
2667 if (adev->ip_blocks[i].version->funcs->late_init) {
2668 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2670 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2671 adev->ip_blocks[i].version->funcs->name, r);
2675 adev->ip_blocks[i].status.late_initialized = true;
2678 r = amdgpu_ras_late_init(adev);
2680 DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2684 amdgpu_ras_set_error_query_ready(adev, true);
2686 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2687 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2689 amdgpu_device_fill_reset_magic(adev);
2691 r = amdgpu_device_enable_mgpu_fan_boost();
2693 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2695 /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2696 if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
2697 adev->asic_type == CHIP_ALDEBARAN ))
2698 amdgpu_dpm_handle_passthrough_sbr(adev, true);
2700 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2701 mutex_lock(&mgpu_info.mutex);
2704 * Reset device p-state to low as this was booted with high.
2706 * This should be performed only after all devices from the same
2707 * hive get initialized.
2709 * However, it's unknown how many device in the hive in advance.
2710 * As this is counted one by one during devices initializations.
2712 * So, we wait for all XGMI interlinked devices initialized.
2713 * This may bring some delays as those devices may come from
2714 * different hives. But that should be OK.
2716 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2717 for (i = 0; i < mgpu_info.num_gpu; i++) {
2718 gpu_instance = &(mgpu_info.gpu_ins[i]);
2719 if (gpu_instance->adev->flags & AMD_IS_APU)
2722 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2723 AMDGPU_XGMI_PSTATE_MIN);
2725 DRM_ERROR("pstate setting failed (%d).\n", r);
2731 mutex_unlock(&mgpu_info.mutex);
2738 * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2740 * @adev: amdgpu_device pointer
2742 * For ASICs need to disable SMC first
2744 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2748 if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2751 for (i = 0; i < adev->num_ip_blocks; i++) {
2752 if (!adev->ip_blocks[i].status.hw)
2754 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2755 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2756 /* XXX handle errors */
2758 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2759 adev->ip_blocks[i].version->funcs->name, r);
2761 adev->ip_blocks[i].status.hw = false;
2767 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2771 for (i = 0; i < adev->num_ip_blocks; i++) {
2772 if (!adev->ip_blocks[i].version->funcs->early_fini)
2775 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2777 DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2778 adev->ip_blocks[i].version->funcs->name, r);
2782 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2783 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2785 amdgpu_amdkfd_suspend(adev, false);
2787 /* Workaroud for ASICs need to disable SMC first */
2788 amdgpu_device_smu_fini_early(adev);
2790 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2791 if (!adev->ip_blocks[i].status.hw)
2794 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2795 /* XXX handle errors */
2797 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2798 adev->ip_blocks[i].version->funcs->name, r);
2801 adev->ip_blocks[i].status.hw = false;
2804 if (amdgpu_sriov_vf(adev)) {
2805 if (amdgpu_virt_release_full_gpu(adev, false))
2806 DRM_ERROR("failed to release exclusive mode on fini\n");
2813 * amdgpu_device_ip_fini - run fini for hardware IPs
2815 * @adev: amdgpu_device pointer
2817 * Main teardown pass for hardware IPs. The list of all the hardware
2818 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2819 * are run. hw_fini tears down the hardware associated with each IP
2820 * and sw_fini tears down any software state associated with each IP.
2821 * Returns 0 on success, negative error code on failure.
2823 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2827 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2828 amdgpu_virt_release_ras_err_handler_data(adev);
2830 if (adev->gmc.xgmi.num_physical_nodes > 1)
2831 amdgpu_xgmi_remove_device(adev);
2833 amdgpu_amdkfd_device_fini_sw(adev);
2835 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2836 if (!adev->ip_blocks[i].status.sw)
2839 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2840 amdgpu_ucode_free_bo(adev);
2841 amdgpu_free_static_csa(&adev->virt.csa_obj);
2842 amdgpu_device_wb_fini(adev);
2843 amdgpu_device_vram_scratch_fini(adev);
2844 amdgpu_ib_pool_fini(adev);
2847 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2848 /* XXX handle errors */
2850 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2851 adev->ip_blocks[i].version->funcs->name, r);
2853 adev->ip_blocks[i].status.sw = false;
2854 adev->ip_blocks[i].status.valid = false;
2857 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2858 if (!adev->ip_blocks[i].status.late_initialized)
2860 if (adev->ip_blocks[i].version->funcs->late_fini)
2861 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2862 adev->ip_blocks[i].status.late_initialized = false;
2865 amdgpu_ras_fini(adev);
2871 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2873 * @work: work_struct.
2875 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2877 struct amdgpu_device *adev =
2878 container_of(work, struct amdgpu_device, delayed_init_work.work);
2881 r = amdgpu_ib_ring_tests(adev);
2883 DRM_ERROR("ib ring test failed (%d).\n", r);
2886 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2888 struct amdgpu_device *adev =
2889 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2891 WARN_ON_ONCE(adev->gfx.gfx_off_state);
2892 WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2894 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2895 adev->gfx.gfx_off_state = true;
2899 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2901 * @adev: amdgpu_device pointer
2903 * Main suspend function for hardware IPs. The list of all the hardware
2904 * IPs that make up the asic is walked, clockgating is disabled and the
2905 * suspend callbacks are run. suspend puts the hardware and software state
2906 * in each IP into a state suitable for suspend.
2907 * Returns 0 on success, negative error code on failure.
2909 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2913 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2914 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2916 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2917 if (!adev->ip_blocks[i].status.valid)
2920 /* displays are handled separately */
2921 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2924 /* XXX handle errors */
2925 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2926 /* XXX handle errors */
2928 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2929 adev->ip_blocks[i].version->funcs->name, r);
2933 adev->ip_blocks[i].status.hw = false;
2940 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2942 * @adev: amdgpu_device pointer
2944 * Main suspend function for hardware IPs. The list of all the hardware
2945 * IPs that make up the asic is walked, clockgating is disabled and the
2946 * suspend callbacks are run. suspend puts the hardware and software state
2947 * in each IP into a state suitable for suspend.
2948 * Returns 0 on success, negative error code on failure.
2950 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2955 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
2957 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2958 if (!adev->ip_blocks[i].status.valid)
2960 /* displays are handled in phase1 */
2961 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2963 /* PSP lost connection when err_event_athub occurs */
2964 if (amdgpu_ras_intr_triggered() &&
2965 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2966 adev->ip_blocks[i].status.hw = false;
2970 /* skip unnecessary suspend if we do not initialize them yet */
2971 if (adev->gmc.xgmi.pending_reset &&
2972 !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2973 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2974 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2975 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2976 adev->ip_blocks[i].status.hw = false;
2980 /* skip suspend of gfx and psp for S0ix
2981 * gfx is in gfxoff state, so on resume it will exit gfxoff just
2982 * like at runtime. PSP is also part of the always on hardware
2983 * so no need to suspend it.
2985 if (adev->in_s0ix &&
2986 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
2987 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
2990 /* XXX handle errors */
2991 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2992 /* XXX handle errors */
2994 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2995 adev->ip_blocks[i].version->funcs->name, r);
2997 adev->ip_blocks[i].status.hw = false;
2998 /* handle putting the SMC in the appropriate state */
2999 if(!amdgpu_sriov_vf(adev)){
3000 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3001 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3003 DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3004 adev->mp1_state, r);
3015 * amdgpu_device_ip_suspend - run suspend for hardware IPs
3017 * @adev: amdgpu_device pointer
3019 * Main suspend function for hardware IPs. The list of all the hardware
3020 * IPs that make up the asic is walked, clockgating is disabled and the
3021 * suspend callbacks are run. suspend puts the hardware and software state
3022 * in each IP into a state suitable for suspend.
3023 * Returns 0 on success, negative error code on failure.
3025 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3029 if (amdgpu_sriov_vf(adev)) {
3030 amdgpu_virt_fini_data_exchange(adev);
3031 amdgpu_virt_request_full_gpu(adev, false);
3034 r = amdgpu_device_ip_suspend_phase1(adev);
3037 r = amdgpu_device_ip_suspend_phase2(adev);
3039 if (amdgpu_sriov_vf(adev))
3040 amdgpu_virt_release_full_gpu(adev, false);
3045 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3049 static enum amd_ip_block_type ip_order[] = {
3050 AMD_IP_BLOCK_TYPE_GMC,
3051 AMD_IP_BLOCK_TYPE_COMMON,
3052 AMD_IP_BLOCK_TYPE_PSP,
3053 AMD_IP_BLOCK_TYPE_IH,
3056 for (i = 0; i < adev->num_ip_blocks; i++) {
3058 struct amdgpu_ip_block *block;
3060 block = &adev->ip_blocks[i];
3061 block->status.hw = false;
3063 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3065 if (block->version->type != ip_order[j] ||
3066 !block->status.valid)
3069 r = block->version->funcs->hw_init(adev);
3070 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3073 block->status.hw = true;
3080 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3084 static enum amd_ip_block_type ip_order[] = {
3085 AMD_IP_BLOCK_TYPE_SMC,
3086 AMD_IP_BLOCK_TYPE_DCE,
3087 AMD_IP_BLOCK_TYPE_GFX,
3088 AMD_IP_BLOCK_TYPE_SDMA,
3089 AMD_IP_BLOCK_TYPE_UVD,
3090 AMD_IP_BLOCK_TYPE_VCE,
3091 AMD_IP_BLOCK_TYPE_VCN
3094 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3096 struct amdgpu_ip_block *block;
3098 for (j = 0; j < adev->num_ip_blocks; j++) {
3099 block = &adev->ip_blocks[j];
3101 if (block->version->type != ip_order[i] ||
3102 !block->status.valid ||
3106 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3107 r = block->version->funcs->resume(adev);
3109 r = block->version->funcs->hw_init(adev);
3111 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3114 block->status.hw = true;
3122 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3124 * @adev: amdgpu_device pointer
3126 * First resume function for hardware IPs. The list of all the hardware
3127 * IPs that make up the asic is walked and the resume callbacks are run for
3128 * COMMON, GMC, and IH. resume puts the hardware into a functional state
3129 * after a suspend and updates the software state as necessary. This
3130 * function is also used for restoring the GPU after a GPU reset.
3131 * Returns 0 on success, negative error code on failure.
3133 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3137 for (i = 0; i < adev->num_ip_blocks; i++) {
3138 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3140 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3141 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3142 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
3144 r = adev->ip_blocks[i].version->funcs->resume(adev);
3146 DRM_ERROR("resume of IP block <%s> failed %d\n",
3147 adev->ip_blocks[i].version->funcs->name, r);
3150 adev->ip_blocks[i].status.hw = true;
3158 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3160 * @adev: amdgpu_device pointer
3162 * First resume function for hardware IPs. The list of all the hardware
3163 * IPs that make up the asic is walked and the resume callbacks are run for
3164 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
3165 * functional state after a suspend and updates the software state as
3166 * necessary. This function is also used for restoring the GPU after a GPU
3168 * Returns 0 on success, negative error code on failure.
3170 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3174 for (i = 0; i < adev->num_ip_blocks; i++) {
3175 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3177 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3178 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3179 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3180 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3182 r = adev->ip_blocks[i].version->funcs->resume(adev);
3184 DRM_ERROR("resume of IP block <%s> failed %d\n",
3185 adev->ip_blocks[i].version->funcs->name, r);
3188 adev->ip_blocks[i].status.hw = true;
3195 * amdgpu_device_ip_resume - run resume for hardware IPs
3197 * @adev: amdgpu_device pointer
3199 * Main resume function for hardware IPs. The hardware IPs
3200 * are split into two resume functions because they are
3201 * are also used in in recovering from a GPU reset and some additional
3202 * steps need to be take between them. In this case (S3/S4) they are
3204 * Returns 0 on success, negative error code on failure.
3206 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3210 r = amdgpu_amdkfd_resume_iommu(adev);
3214 r = amdgpu_device_ip_resume_phase1(adev);
3218 r = amdgpu_device_fw_loading(adev);
3222 r = amdgpu_device_ip_resume_phase2(adev);
3228 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3230 * @adev: amdgpu_device pointer
3232 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3234 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3236 if (amdgpu_sriov_vf(adev)) {
3237 if (adev->is_atom_fw) {
3238 if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3239 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3241 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3242 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3245 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3246 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3251 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3253 * @asic_type: AMD asic type
3255 * Check if there is DC (new modesetting infrastructre) support for an asic.
3256 * returns true if DC has support, false if not.
3258 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3260 switch (asic_type) {
3261 #ifdef CONFIG_DRM_AMDGPU_SI
3265 /* chips with no display hardware */
3267 #if defined(CONFIG_DRM_AMD_DC)
3273 * We have systems in the wild with these ASICs that require
3274 * LVDS and VGA support which is not supported with DC.
3276 * Fallback to the non-DC driver here by default so as not to
3277 * cause regressions.
3279 #if defined(CONFIG_DRM_AMD_DC_SI)
3280 return amdgpu_dc > 0;
3289 * We have systems in the wild with these ASICs that require
3290 * LVDS and VGA support which is not supported with DC.
3292 * Fallback to the non-DC driver here by default so as not to
3293 * cause regressions.
3295 return amdgpu_dc > 0;
3297 return amdgpu_dc != 0;
3301 DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3302 "but isn't supported by ASIC, ignoring\n");
3309 * amdgpu_device_has_dc_support - check if dc is supported
3311 * @adev: amdgpu_device pointer
3313 * Returns true for supported, false for not supported
3315 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3317 if (amdgpu_sriov_vf(adev) ||
3318 adev->enable_virtual_display ||
3319 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3322 return amdgpu_device_asic_has_dc_support(adev->asic_type);
3325 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3327 struct amdgpu_device *adev =
3328 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3329 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3331 /* It's a bug to not have a hive within this function */
3336 * Use task barrier to synchronize all xgmi reset works across the
3337 * hive. task_barrier_enter and task_barrier_exit will block
3338 * until all the threads running the xgmi reset works reach
3339 * those points. task_barrier_full will do both blocks.
3341 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3343 task_barrier_enter(&hive->tb);
3344 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3346 if (adev->asic_reset_res)
3349 task_barrier_exit(&hive->tb);
3350 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3352 if (adev->asic_reset_res)
3355 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3356 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3357 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3360 task_barrier_full(&hive->tb);
3361 adev->asic_reset_res = amdgpu_asic_reset(adev);
3365 if (adev->asic_reset_res)
3366 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3367 adev->asic_reset_res, adev_to_drm(adev)->unique);
3368 amdgpu_put_xgmi_hive(hive);
3371 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3373 char *input = amdgpu_lockup_timeout;
3374 char *timeout_setting = NULL;
3380 * By default timeout for non compute jobs is 10000
3381 * and 60000 for compute jobs.
3382 * In SR-IOV or passthrough mode, timeout for compute
3383 * jobs are 60000 by default.
3385 adev->gfx_timeout = msecs_to_jiffies(10000);
3386 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3387 if (amdgpu_sriov_vf(adev))
3388 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3389 msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3391 adev->compute_timeout = msecs_to_jiffies(60000);
3393 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3394 while ((timeout_setting = strsep(&input, ",")) &&
3395 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3396 ret = kstrtol(timeout_setting, 0, &timeout);
3403 } else if (timeout < 0) {
3404 timeout = MAX_SCHEDULE_TIMEOUT;
3405 dev_warn(adev->dev, "lockup timeout disabled");
3406 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3408 timeout = msecs_to_jiffies(timeout);
3413 adev->gfx_timeout = timeout;
3416 adev->compute_timeout = timeout;
3419 adev->sdma_timeout = timeout;
3422 adev->video_timeout = timeout;
3429 * There is only one value specified and
3430 * it should apply to all non-compute jobs.
3433 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3434 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3435 adev->compute_timeout = adev->gfx_timeout;
3443 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3445 * @adev: amdgpu_device pointer
3447 * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3449 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3451 struct iommu_domain *domain;
3453 domain = iommu_get_domain_for_dev(adev->dev);
3454 if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3455 adev->ram_is_direct_mapped = true;
3458 static const struct attribute *amdgpu_dev_attributes[] = {
3459 &dev_attr_product_name.attr,
3460 &dev_attr_product_number.attr,
3461 &dev_attr_serial_number.attr,
3462 &dev_attr_pcie_replay_count.attr,
3467 * amdgpu_device_init - initialize the driver
3469 * @adev: amdgpu_device pointer
3470 * @flags: driver flags
3472 * Initializes the driver info and hw (all asics).
3473 * Returns 0 for success or an error on failure.
3474 * Called at driver startup.
3476 int amdgpu_device_init(struct amdgpu_device *adev,
3479 struct drm_device *ddev = adev_to_drm(adev);
3480 struct pci_dev *pdev = adev->pdev;
3485 adev->shutdown = false;
3486 adev->flags = flags;
3488 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3489 adev->asic_type = amdgpu_force_asic_type;
3491 adev->asic_type = flags & AMD_ASIC_MASK;
3493 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3494 if (amdgpu_emu_mode == 1)
3495 adev->usec_timeout *= 10;
3496 adev->gmc.gart_size = 512 * 1024 * 1024;
3497 adev->accel_working = false;
3498 adev->num_rings = 0;
3499 adev->mman.buffer_funcs = NULL;
3500 adev->mman.buffer_funcs_ring = NULL;
3501 adev->vm_manager.vm_pte_funcs = NULL;
3502 adev->vm_manager.vm_pte_num_scheds = 0;
3503 adev->gmc.gmc_funcs = NULL;
3504 adev->harvest_ip_mask = 0x0;
3505 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3506 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3508 adev->smc_rreg = &amdgpu_invalid_rreg;
3509 adev->smc_wreg = &amdgpu_invalid_wreg;
3510 adev->pcie_rreg = &amdgpu_invalid_rreg;
3511 adev->pcie_wreg = &amdgpu_invalid_wreg;
3512 adev->pciep_rreg = &amdgpu_invalid_rreg;
3513 adev->pciep_wreg = &amdgpu_invalid_wreg;
3514 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3515 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3516 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3517 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3518 adev->didt_rreg = &amdgpu_invalid_rreg;
3519 adev->didt_wreg = &amdgpu_invalid_wreg;
3520 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3521 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3522 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3523 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3525 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3526 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3527 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3529 /* mutex initialization are all done here so we
3530 * can recall function without having locking issues */
3531 mutex_init(&adev->firmware.mutex);
3532 mutex_init(&adev->pm.mutex);
3533 mutex_init(&adev->gfx.gpu_clock_mutex);
3534 mutex_init(&adev->srbm_mutex);
3535 mutex_init(&adev->gfx.pipe_reserve_mutex);
3536 mutex_init(&adev->gfx.gfx_off_mutex);
3537 mutex_init(&adev->grbm_idx_mutex);
3538 mutex_init(&adev->mn_lock);
3539 mutex_init(&adev->virt.vf_errors.lock);
3540 hash_init(adev->mn_hash);
3541 mutex_init(&adev->psp.mutex);
3542 mutex_init(&adev->notifier_lock);
3543 mutex_init(&adev->pm.stable_pstate_ctx_lock);
3544 mutex_init(&adev->benchmark_mutex);
3546 amdgpu_device_init_apu_flags(adev);
3548 r = amdgpu_device_check_arguments(adev);
3552 spin_lock_init(&adev->mmio_idx_lock);
3553 spin_lock_init(&adev->smc_idx_lock);
3554 spin_lock_init(&adev->pcie_idx_lock);
3555 spin_lock_init(&adev->uvd_ctx_idx_lock);
3556 spin_lock_init(&adev->didt_idx_lock);
3557 spin_lock_init(&adev->gc_cac_idx_lock);
3558 spin_lock_init(&adev->se_cac_idx_lock);
3559 spin_lock_init(&adev->audio_endpt_idx_lock);
3560 spin_lock_init(&adev->mm_stats.lock);
3562 INIT_LIST_HEAD(&adev->shadow_list);
3563 mutex_init(&adev->shadow_list_lock);
3565 INIT_LIST_HEAD(&adev->reset_list);
3567 INIT_LIST_HEAD(&adev->ras_list);
3569 INIT_DELAYED_WORK(&adev->delayed_init_work,
3570 amdgpu_device_delayed_init_work_handler);
3571 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3572 amdgpu_device_delay_enable_gfx_off);
3574 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3576 adev->gfx.gfx_off_req_count = 1;
3577 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3579 atomic_set(&adev->throttling_logging_enabled, 1);
3581 * If throttling continues, logging will be performed every minute
3582 * to avoid log flooding. "-1" is subtracted since the thermal
3583 * throttling interrupt comes every second. Thus, the total logging
3584 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3585 * for throttling interrupt) = 60 seconds.
3587 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3588 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3590 /* Registers mapping */
3591 /* TODO: block userspace mapping of io register */
3592 if (adev->asic_type >= CHIP_BONAIRE) {
3593 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3594 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3596 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3597 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3600 for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3601 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3603 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3604 if (adev->rmmio == NULL) {
3607 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3608 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3610 amdgpu_device_get_pcie_info(adev);
3613 DRM_INFO("MCBP is enabled\n");
3615 if (adev->asic_type >= CHIP_NAVI10) {
3616 if (amdgpu_mes || amdgpu_mes_kiq)
3617 adev->enable_mes = true;
3620 adev->enable_mes_kiq = true;
3624 * Reset domain needs to be present early, before XGMI hive discovered
3625 * (if any) and intitialized to use reset sem and in_gpu reset flag
3626 * early on during init and before calling to RREG32.
3628 adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3629 if (!adev->reset_domain)
3632 /* detect hw virtualization here */
3633 amdgpu_detect_virtualization(adev);
3635 r = amdgpu_device_get_job_timeout_settings(adev);
3637 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3641 /* early init functions */
3642 r = amdgpu_device_ip_early_init(adev);
3646 /* Enable TMZ based on IP_VERSION */
3647 amdgpu_gmc_tmz_set(adev);
3649 amdgpu_gmc_noretry_set(adev);
3650 /* Need to get xgmi info early to decide the reset behavior*/
3651 if (adev->gmc.xgmi.supported) {
3652 r = adev->gfxhub.funcs->get_xgmi_info(adev);
3657 /* enable PCIE atomic ops */
3658 if (amdgpu_sriov_vf(adev))
3659 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3660 adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3661 (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3663 adev->have_atomics_support =
3664 !pci_enable_atomic_ops_to_root(adev->pdev,
3665 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3666 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3667 if (!adev->have_atomics_support)
3668 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3670 /* doorbell bar mapping and doorbell index init*/
3671 amdgpu_device_doorbell_init(adev);
3673 if (amdgpu_emu_mode == 1) {
3674 /* post the asic on emulation mode */
3675 emu_soc_asic_init(adev);
3676 goto fence_driver_init;
3679 amdgpu_reset_init(adev);
3681 /* detect if we are with an SRIOV vbios */
3682 amdgpu_device_detect_sriov_bios(adev);
3684 /* check if we need to reset the asic
3685 * E.g., driver was not cleanly unloaded previously, etc.
3687 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3688 if (adev->gmc.xgmi.num_physical_nodes) {
3689 dev_info(adev->dev, "Pending hive reset.\n");
3690 adev->gmc.xgmi.pending_reset = true;
3691 /* Only need to init necessary block for SMU to handle the reset */
3692 for (i = 0; i < adev->num_ip_blocks; i++) {
3693 if (!adev->ip_blocks[i].status.valid)
3695 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3696 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3697 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3698 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3699 DRM_DEBUG("IP %s disabled for hw_init.\n",
3700 adev->ip_blocks[i].version->funcs->name);
3701 adev->ip_blocks[i].status.hw = true;
3705 r = amdgpu_asic_reset(adev);
3707 dev_err(adev->dev, "asic reset on init failed\n");
3713 pci_enable_pcie_error_reporting(adev->pdev);
3715 /* Post card if necessary */
3716 if (amdgpu_device_need_post(adev)) {
3718 dev_err(adev->dev, "no vBIOS found\n");
3722 DRM_INFO("GPU posting now...\n");
3723 r = amdgpu_device_asic_init(adev);
3725 dev_err(adev->dev, "gpu post error!\n");
3730 if (adev->is_atom_fw) {
3731 /* Initialize clocks */
3732 r = amdgpu_atomfirmware_get_clock_info(adev);
3734 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3735 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3739 /* Initialize clocks */
3740 r = amdgpu_atombios_get_clock_info(adev);
3742 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3743 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3746 /* init i2c buses */
3747 if (!amdgpu_device_has_dc_support(adev))
3748 amdgpu_atombios_i2c_init(adev);
3753 r = amdgpu_fence_driver_sw_init(adev);
3755 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3756 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3760 /* init the mode config */
3761 drm_mode_config_init(adev_to_drm(adev));
3763 r = amdgpu_device_ip_init(adev);
3765 /* failed in exclusive mode due to timeout */
3766 if (amdgpu_sriov_vf(adev) &&
3767 !amdgpu_sriov_runtime(adev) &&
3768 amdgpu_virt_mmio_blocked(adev) &&
3769 !amdgpu_virt_wait_reset(adev)) {
3770 dev_err(adev->dev, "VF exclusive mode timeout\n");
3771 /* Don't send request since VF is inactive. */
3772 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3773 adev->virt.ops = NULL;
3775 goto release_ras_con;
3777 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3778 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3779 goto release_ras_con;
3782 amdgpu_fence_driver_hw_init(adev);
3785 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3786 adev->gfx.config.max_shader_engines,
3787 adev->gfx.config.max_sh_per_se,
3788 adev->gfx.config.max_cu_per_sh,
3789 adev->gfx.cu_info.number);
3791 adev->accel_working = true;
3793 amdgpu_vm_check_compute_bug(adev);
3795 /* Initialize the buffer migration limit. */
3796 if (amdgpu_moverate >= 0)
3797 max_MBps = amdgpu_moverate;
3799 max_MBps = 8; /* Allow 8 MB/s. */
3800 /* Get a log2 for easy divisions. */
3801 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3803 r = amdgpu_pm_sysfs_init(adev);
3805 adev->pm_sysfs_en = false;
3806 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3808 adev->pm_sysfs_en = true;
3810 r = amdgpu_ucode_sysfs_init(adev);
3812 adev->ucode_sysfs_en = false;
3813 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3815 adev->ucode_sysfs_en = true;
3817 r = amdgpu_psp_sysfs_init(adev);
3819 adev->psp_sysfs_en = false;
3820 if (!amdgpu_sriov_vf(adev))
3821 DRM_ERROR("Creating psp sysfs failed\n");
3823 adev->psp_sysfs_en = true;
3826 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3827 * Otherwise the mgpu fan boost feature will be skipped due to the
3828 * gpu instance is counted less.
3830 amdgpu_register_gpu_instance(adev);
3832 /* enable clockgating, etc. after ib tests, etc. since some blocks require
3833 * explicit gating rather than handling it automatically.
3835 if (!adev->gmc.xgmi.pending_reset) {
3836 r = amdgpu_device_ip_late_init(adev);
3838 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3839 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3840 goto release_ras_con;
3843 amdgpu_ras_resume(adev);
3844 queue_delayed_work(system_wq, &adev->delayed_init_work,
3845 msecs_to_jiffies(AMDGPU_RESUME_MS));
3848 if (amdgpu_sriov_vf(adev))
3849 flush_delayed_work(&adev->delayed_init_work);
3851 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3853 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3855 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3856 r = amdgpu_pmu_init(adev);
3858 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3860 /* Have stored pci confspace at hand for restore in sudden PCI error */
3861 if (amdgpu_device_cache_pci_state(adev->pdev))
3862 pci_restore_state(pdev);
3864 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3865 /* this will fail for cards that aren't VGA class devices, just
3867 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3868 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3870 if (amdgpu_device_supports_px(ddev)) {
3872 vga_switcheroo_register_client(adev->pdev,
3873 &amdgpu_switcheroo_ops, px);
3874 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3877 if (adev->gmc.xgmi.pending_reset)
3878 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3879 msecs_to_jiffies(AMDGPU_RESUME_MS));
3881 amdgpu_device_check_iommu_direct_map(adev);
3886 amdgpu_release_ras_context(adev);
3889 amdgpu_vf_error_trans_all(adev);
3894 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3897 /* Clear all CPU mappings pointing to this device */
3898 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3900 /* Unmap all mapped bars - Doorbell, registers and VRAM */
3901 amdgpu_device_doorbell_fini(adev);
3903 iounmap(adev->rmmio);
3905 if (adev->mman.aper_base_kaddr)
3906 iounmap(adev->mman.aper_base_kaddr);
3907 adev->mman.aper_base_kaddr = NULL;
3909 /* Memory manager related */
3910 if (!adev->gmc.xgmi.connected_to_cpu) {
3911 arch_phys_wc_del(adev->gmc.vram_mtrr);
3912 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3917 * amdgpu_device_fini_hw - tear down the driver
3919 * @adev: amdgpu_device pointer
3921 * Tear down the driver info (all asics).
3922 * Called at driver shutdown.
3924 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3926 dev_info(adev->dev, "amdgpu: finishing device.\n");
3927 flush_delayed_work(&adev->delayed_init_work);
3928 adev->shutdown = true;
3930 /* make sure IB test finished before entering exclusive mode
3931 * to avoid preemption on IB test
3933 if (amdgpu_sriov_vf(adev)) {
3934 amdgpu_virt_request_full_gpu(adev, false);
3935 amdgpu_virt_fini_data_exchange(adev);
3938 /* disable all interrupts */
3939 amdgpu_irq_disable_all(adev);
3940 if (adev->mode_info.mode_config_initialized){
3941 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
3942 drm_helper_force_disable_all(adev_to_drm(adev));
3944 drm_atomic_helper_shutdown(adev_to_drm(adev));
3946 amdgpu_fence_driver_hw_fini(adev);
3948 if (adev->mman.initialized) {
3949 flush_delayed_work(&adev->mman.bdev.wq);
3950 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3953 if (adev->pm_sysfs_en)
3954 amdgpu_pm_sysfs_fini(adev);
3955 if (adev->ucode_sysfs_en)
3956 amdgpu_ucode_sysfs_fini(adev);
3957 if (adev->psp_sysfs_en)
3958 amdgpu_psp_sysfs_fini(adev);
3959 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3961 /* disable ras feature must before hw fini */
3962 amdgpu_ras_pre_fini(adev);
3964 amdgpu_device_ip_fini_early(adev);
3966 amdgpu_irq_fini_hw(adev);
3968 if (adev->mman.initialized)
3969 ttm_device_clear_dma_mappings(&adev->mman.bdev);
3971 amdgpu_gart_dummy_page_fini(adev);
3973 if (drm_dev_is_unplugged(adev_to_drm(adev)))
3974 amdgpu_device_unmap_mmio(adev);
3978 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
3982 amdgpu_fence_driver_sw_fini(adev);
3983 amdgpu_device_ip_fini(adev);
3984 release_firmware(adev->firmware.gpu_info_fw);
3985 adev->firmware.gpu_info_fw = NULL;
3986 adev->accel_working = false;
3988 amdgpu_reset_fini(adev);
3990 /* free i2c buses */
3991 if (!amdgpu_device_has_dc_support(adev))
3992 amdgpu_i2c_fini(adev);
3994 if (amdgpu_emu_mode != 1)
3995 amdgpu_atombios_fini(adev);
3999 if (amdgpu_device_supports_px(adev_to_drm(adev))) {
4000 vga_switcheroo_unregister_client(adev->pdev);
4001 vga_switcheroo_fini_domain_pm_ops(adev->dev);
4003 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4004 vga_client_unregister(adev->pdev);
4006 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4008 iounmap(adev->rmmio);
4010 amdgpu_device_doorbell_fini(adev);
4014 if (IS_ENABLED(CONFIG_PERF_EVENTS))
4015 amdgpu_pmu_fini(adev);
4016 if (adev->mman.discovery_bin)
4017 amdgpu_discovery_fini(adev);
4019 amdgpu_reset_put_reset_domain(adev->reset_domain);
4020 adev->reset_domain = NULL;
4022 kfree(adev->pci_state);
4027 * amdgpu_device_evict_resources - evict device resources
4028 * @adev: amdgpu device object
4030 * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4031 * of the vram memory type. Mainly used for evicting device resources
4035 static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
4037 /* No need to evict vram on APUs for suspend to ram or s2idle */
4038 if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4041 if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM))
4042 DRM_WARN("evicting device resources failed\n");
4050 * amdgpu_device_suspend - initiate device suspend
4052 * @dev: drm dev pointer
4053 * @fbcon : notify the fbdev of suspend
4055 * Puts the hw in the suspend state (all asics).
4056 * Returns 0 for success or an error on failure.
4057 * Called at driver suspend.
4059 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4061 struct amdgpu_device *adev = drm_to_adev(dev);
4063 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4066 adev->in_suspend = true;
4068 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4069 DRM_WARN("smart shift update failed\n");
4071 drm_kms_helper_poll_disable(dev);
4074 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4076 cancel_delayed_work_sync(&adev->delayed_init_work);
4078 amdgpu_ras_suspend(adev);
4080 amdgpu_device_ip_suspend_phase1(adev);
4083 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4085 amdgpu_device_evict_resources(adev);
4087 amdgpu_fence_driver_hw_fini(adev);
4089 amdgpu_device_ip_suspend_phase2(adev);
4095 * amdgpu_device_resume - initiate device resume
4097 * @dev: drm dev pointer
4098 * @fbcon : notify the fbdev of resume
4100 * Bring the hw back to operating state (all asics).
4101 * Returns 0 for success or an error on failure.
4102 * Called at driver resume.
4104 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4106 struct amdgpu_device *adev = drm_to_adev(dev);
4109 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4113 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4116 if (amdgpu_device_need_post(adev)) {
4117 r = amdgpu_device_asic_init(adev);
4119 dev_err(adev->dev, "amdgpu asic init failed\n");
4122 r = amdgpu_device_ip_resume(adev);
4124 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4127 amdgpu_fence_driver_hw_init(adev);
4129 r = amdgpu_device_ip_late_init(adev);
4133 queue_delayed_work(system_wq, &adev->delayed_init_work,
4134 msecs_to_jiffies(AMDGPU_RESUME_MS));
4136 if (!adev->in_s0ix) {
4137 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4142 /* Make sure IB tests flushed */
4143 flush_delayed_work(&adev->delayed_init_work);
4146 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4148 drm_kms_helper_poll_enable(dev);
4150 amdgpu_ras_resume(adev);
4153 * Most of the connector probing functions try to acquire runtime pm
4154 * refs to ensure that the GPU is powered on when connector polling is
4155 * performed. Since we're calling this from a runtime PM callback,
4156 * trying to acquire rpm refs will cause us to deadlock.
4158 * Since we're guaranteed to be holding the rpm lock, it's safe to
4159 * temporarily disable the rpm helpers so this doesn't deadlock us.
4162 dev->dev->power.disable_depth++;
4164 if (!amdgpu_device_has_dc_support(adev))
4165 drm_helper_hpd_irq_event(dev);
4167 drm_kms_helper_hotplug_event(dev);
4169 dev->dev->power.disable_depth--;
4171 adev->in_suspend = false;
4173 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4174 DRM_WARN("smart shift update failed\n");
4180 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4182 * @adev: amdgpu_device pointer
4184 * The list of all the hardware IPs that make up the asic is walked and
4185 * the check_soft_reset callbacks are run. check_soft_reset determines
4186 * if the asic is still hung or not.
4187 * Returns true if any of the IPs are still in a hung state, false if not.
4189 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4192 bool asic_hang = false;
4194 if (amdgpu_sriov_vf(adev))
4197 if (amdgpu_asic_need_full_reset(adev))
4200 for (i = 0; i < adev->num_ip_blocks; i++) {
4201 if (!adev->ip_blocks[i].status.valid)
4203 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4204 adev->ip_blocks[i].status.hang =
4205 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4206 if (adev->ip_blocks[i].status.hang) {
4207 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4215 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4217 * @adev: amdgpu_device pointer
4219 * The list of all the hardware IPs that make up the asic is walked and the
4220 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
4221 * handles any IP specific hardware or software state changes that are
4222 * necessary for a soft reset to succeed.
4223 * Returns 0 on success, negative error code on failure.
4225 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4229 for (i = 0; i < adev->num_ip_blocks; i++) {
4230 if (!adev->ip_blocks[i].status.valid)
4232 if (adev->ip_blocks[i].status.hang &&
4233 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4234 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4244 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4246 * @adev: amdgpu_device pointer
4248 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
4249 * reset is necessary to recover.
4250 * Returns true if a full asic reset is required, false if not.
4252 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4256 if (amdgpu_asic_need_full_reset(adev))
4259 for (i = 0; i < adev->num_ip_blocks; i++) {
4260 if (!adev->ip_blocks[i].status.valid)
4262 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4263 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4264 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4265 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4266 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4267 if (adev->ip_blocks[i].status.hang) {
4268 dev_info(adev->dev, "Some block need full reset!\n");
4277 * amdgpu_device_ip_soft_reset - do a soft reset
4279 * @adev: amdgpu_device pointer
4281 * The list of all the hardware IPs that make up the asic is walked and the
4282 * soft_reset callbacks are run if the block is hung. soft_reset handles any
4283 * IP specific hardware or software state changes that are necessary to soft
4285 * Returns 0 on success, negative error code on failure.
4287 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4291 for (i = 0; i < adev->num_ip_blocks; i++) {
4292 if (!adev->ip_blocks[i].status.valid)
4294 if (adev->ip_blocks[i].status.hang &&
4295 adev->ip_blocks[i].version->funcs->soft_reset) {
4296 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4306 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4308 * @adev: amdgpu_device pointer
4310 * The list of all the hardware IPs that make up the asic is walked and the
4311 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
4312 * handles any IP specific hardware or software state changes that are
4313 * necessary after the IP has been soft reset.
4314 * Returns 0 on success, negative error code on failure.
4316 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4320 for (i = 0; i < adev->num_ip_blocks; i++) {
4321 if (!adev->ip_blocks[i].status.valid)
4323 if (adev->ip_blocks[i].status.hang &&
4324 adev->ip_blocks[i].version->funcs->post_soft_reset)
4325 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4334 * amdgpu_device_recover_vram - Recover some VRAM contents
4336 * @adev: amdgpu_device pointer
4338 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
4339 * restore things like GPUVM page tables after a GPU reset where
4340 * the contents of VRAM might be lost.
4343 * 0 on success, negative error code on failure.
4345 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4347 struct dma_fence *fence = NULL, *next = NULL;
4348 struct amdgpu_bo *shadow;
4349 struct amdgpu_bo_vm *vmbo;
4352 if (amdgpu_sriov_runtime(adev))
4353 tmo = msecs_to_jiffies(8000);
4355 tmo = msecs_to_jiffies(100);
4357 dev_info(adev->dev, "recover vram bo from shadow start\n");
4358 mutex_lock(&adev->shadow_list_lock);
4359 list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4361 /* No need to recover an evicted BO */
4362 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4363 shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4364 shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4367 r = amdgpu_bo_restore_shadow(shadow, &next);
4372 tmo = dma_fence_wait_timeout(fence, false, tmo);
4373 dma_fence_put(fence);
4378 } else if (tmo < 0) {
4386 mutex_unlock(&adev->shadow_list_lock);
4389 tmo = dma_fence_wait_timeout(fence, false, tmo);
4390 dma_fence_put(fence);
4392 if (r < 0 || tmo <= 0) {
4393 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4397 dev_info(adev->dev, "recover vram bo from shadow done\n");
4403 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4405 * @adev: amdgpu_device pointer
4406 * @from_hypervisor: request from hypervisor
4408 * do VF FLR and reinitialize Asic
4409 * return 0 means succeeded otherwise failed
4411 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4412 bool from_hypervisor)
4415 struct amdgpu_hive_info *hive = NULL;
4416 int retry_limit = 0;
4419 amdgpu_amdkfd_pre_reset(adev);
4421 amdgpu_amdkfd_pre_reset(adev);
4423 if (from_hypervisor)
4424 r = amdgpu_virt_request_full_gpu(adev, true);
4426 r = amdgpu_virt_reset_gpu(adev);
4430 /* Resume IP prior to SMC */
4431 r = amdgpu_device_ip_reinit_early_sriov(adev);
4435 amdgpu_virt_init_data_exchange(adev);
4437 r = amdgpu_device_fw_loading(adev);
4441 /* now we are okay to resume SMC/CP/SDMA */
4442 r = amdgpu_device_ip_reinit_late_sriov(adev);
4446 hive = amdgpu_get_xgmi_hive(adev);
4447 /* Update PSP FW topology after reset */
4448 if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4449 r = amdgpu_xgmi_update_topology(hive, adev);
4452 amdgpu_put_xgmi_hive(hive);
4455 amdgpu_irq_gpu_reset_resume_helper(adev);
4456 r = amdgpu_ib_ring_tests(adev);
4458 amdgpu_amdkfd_post_reset(adev);
4462 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4463 amdgpu_inc_vram_lost(adev);
4464 r = amdgpu_device_recover_vram(adev);
4466 amdgpu_virt_release_full_gpu(adev, true);
4468 if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4469 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4473 DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4480 * amdgpu_device_has_job_running - check if there is any job in mirror list
4482 * @adev: amdgpu_device pointer
4484 * check if there is any job in mirror list
4486 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4489 struct drm_sched_job *job;
4491 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4492 struct amdgpu_ring *ring = adev->rings[i];
4494 if (!ring || !ring->sched.thread)
4497 spin_lock(&ring->sched.job_list_lock);
4498 job = list_first_entry_or_null(&ring->sched.pending_list,
4499 struct drm_sched_job, list);
4500 spin_unlock(&ring->sched.job_list_lock);
4508 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4510 * @adev: amdgpu_device pointer
4512 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4515 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4517 if (!amdgpu_device_ip_check_soft_reset(adev)) {
4518 dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
4522 if (amdgpu_gpu_recovery == 0)
4525 if (amdgpu_sriov_vf(adev))
4528 if (amdgpu_gpu_recovery == -1) {
4529 switch (adev->asic_type) {
4530 #ifdef CONFIG_DRM_AMDGPU_SI
4537 #ifdef CONFIG_DRM_AMDGPU_CIK
4544 case CHIP_CYAN_SKILLFISH:
4554 dev_info(adev->dev, "GPU recovery disabled.\n");
4558 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4563 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4565 dev_info(adev->dev, "GPU mode1 reset\n");
4568 pci_clear_master(adev->pdev);
4570 amdgpu_device_cache_pci_state(adev->pdev);
4572 if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4573 dev_info(adev->dev, "GPU smu mode1 reset\n");
4574 ret = amdgpu_dpm_mode1_reset(adev);
4576 dev_info(adev->dev, "GPU psp mode1 reset\n");
4577 ret = psp_gpu_reset(adev);
4581 dev_err(adev->dev, "GPU mode1 reset failed\n");
4583 amdgpu_device_load_pci_state(adev->pdev);
4585 /* wait for asic to come out of reset */
4586 for (i = 0; i < adev->usec_timeout; i++) {
4587 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4589 if (memsize != 0xffffffff)
4594 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4598 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4599 struct amdgpu_reset_context *reset_context)
4602 struct amdgpu_job *job = NULL;
4603 bool need_full_reset =
4604 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4606 if (reset_context->reset_req_dev == adev)
4607 job = reset_context->job;
4609 if (amdgpu_sriov_vf(adev)) {
4610 /* stop the data exchange thread */
4611 amdgpu_virt_fini_data_exchange(adev);
4614 /* block all schedulers and reset given job's ring */
4615 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4616 struct amdgpu_ring *ring = adev->rings[i];
4618 if (!ring || !ring->sched.thread)
4621 /*clear job fence from fence drv to avoid force_completion
4622 *leave NULL and vm flush fence in fence drv */
4623 amdgpu_fence_driver_clear_job_fences(ring);
4625 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4626 amdgpu_fence_driver_force_completion(ring);
4630 drm_sched_increase_karma(&job->base);
4632 r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4633 /* If reset handler not implemented, continue; otherwise return */
4639 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4640 if (!amdgpu_sriov_vf(adev)) {
4642 if (!need_full_reset)
4643 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4645 if (!need_full_reset) {
4646 amdgpu_device_ip_pre_soft_reset(adev);
4647 r = amdgpu_device_ip_soft_reset(adev);
4648 amdgpu_device_ip_post_soft_reset(adev);
4649 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4650 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4651 need_full_reset = true;
4655 if (need_full_reset)
4656 r = amdgpu_device_ip_suspend(adev);
4657 if (need_full_reset)
4658 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4660 clear_bit(AMDGPU_NEED_FULL_RESET,
4661 &reset_context->flags);
4667 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
4672 lockdep_assert_held(&adev->reset_domain->sem);
4675 for (i = 0; i < adev->num_regs; i++) {
4676 reg_value = RREG32(adev->reset_dump_reg_list[i]);
4677 trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i], reg_value);
4683 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4684 struct amdgpu_reset_context *reset_context)
4686 struct amdgpu_device *tmp_adev = NULL;
4687 bool need_full_reset, skip_hw_reset, vram_lost = false;
4690 /* Try reset handler method first */
4691 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4693 amdgpu_reset_reg_dumps(tmp_adev);
4694 r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4695 /* If reset handler not implemented, continue; otherwise return */
4701 /* Reset handler not implemented, use the default method */
4703 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4704 skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4707 * ASIC reset has to be done on all XGMI hive nodes ASAP
4708 * to allow proper links negotiation in FW (within 1 sec)
4710 if (!skip_hw_reset && need_full_reset) {
4711 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4712 /* For XGMI run all resets in parallel to speed up the process */
4713 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4714 tmp_adev->gmc.xgmi.pending_reset = false;
4715 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4718 r = amdgpu_asic_reset(tmp_adev);
4721 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4722 r, adev_to_drm(tmp_adev)->unique);
4727 /* For XGMI wait for all resets to complete before proceed */
4729 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4730 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4731 flush_work(&tmp_adev->xgmi_reset_work);
4732 r = tmp_adev->asic_reset_res;
4740 if (!r && amdgpu_ras_intr_triggered()) {
4741 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4742 if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
4743 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
4744 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
4747 amdgpu_ras_intr_cleared();
4750 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4751 if (need_full_reset) {
4753 r = amdgpu_device_asic_init(tmp_adev);
4755 dev_warn(tmp_adev->dev, "asic atom init failed!");
4757 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4758 r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4762 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4766 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4768 DRM_INFO("VRAM is lost due to GPU reset!\n");
4769 amdgpu_inc_vram_lost(tmp_adev);
4772 r = amdgpu_device_fw_loading(tmp_adev);
4776 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4781 amdgpu_device_fill_reset_magic(tmp_adev);
4784 * Add this ASIC as tracked as reset was already
4785 * complete successfully.
4787 amdgpu_register_gpu_instance(tmp_adev);
4789 if (!reset_context->hive &&
4790 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4791 amdgpu_xgmi_add_device(tmp_adev);
4793 r = amdgpu_device_ip_late_init(tmp_adev);
4797 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
4800 * The GPU enters bad state once faulty pages
4801 * by ECC has reached the threshold, and ras
4802 * recovery is scheduled next. So add one check
4803 * here to break recovery if it indeed exceeds
4804 * bad page threshold, and remind user to
4805 * retire this GPU or setting one bigger
4806 * bad_page_threshold value to fix this once
4807 * probing driver again.
4809 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4811 amdgpu_ras_resume(tmp_adev);
4817 /* Update PSP FW topology after reset */
4818 if (reset_context->hive &&
4819 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4820 r = amdgpu_xgmi_update_topology(
4821 reset_context->hive, tmp_adev);
4827 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4828 r = amdgpu_ib_ring_tests(tmp_adev);
4830 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4831 need_full_reset = true;
4838 r = amdgpu_device_recover_vram(tmp_adev);
4840 tmp_adev->asic_reset_res = r;
4844 if (need_full_reset)
4845 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4847 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4851 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
4854 switch (amdgpu_asic_reset_method(adev)) {
4855 case AMD_RESET_METHOD_MODE1:
4856 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4858 case AMD_RESET_METHOD_MODE2:
4859 adev->mp1_state = PP_MP1_STATE_RESET;
4862 adev->mp1_state = PP_MP1_STATE_NONE;
4867 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
4869 amdgpu_vf_error_trans_all(adev);
4870 adev->mp1_state = PP_MP1_STATE_NONE;
4873 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4875 struct pci_dev *p = NULL;
4877 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4878 adev->pdev->bus->number, 1);
4880 pm_runtime_enable(&(p->dev));
4881 pm_runtime_resume(&(p->dev));
4885 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4887 enum amd_reset_method reset_method;
4888 struct pci_dev *p = NULL;
4892 * For now, only BACO and mode1 reset are confirmed
4893 * to suffer the audio issue without proper suspended.
4895 reset_method = amdgpu_asic_reset_method(adev);
4896 if ((reset_method != AMD_RESET_METHOD_BACO) &&
4897 (reset_method != AMD_RESET_METHOD_MODE1))
4900 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4901 adev->pdev->bus->number, 1);
4905 expires = pm_runtime_autosuspend_expiration(&(p->dev));
4908 * If we cannot get the audio device autosuspend delay,
4909 * a fixed 4S interval will be used. Considering 3S is
4910 * the audio controller default autosuspend delay setting.
4911 * 4S used here is guaranteed to cover that.
4913 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4915 while (!pm_runtime_status_suspended(&(p->dev))) {
4916 if (!pm_runtime_suspend(&(p->dev)))
4919 if (expires < ktime_get_mono_fast_ns()) {
4920 dev_warn(adev->dev, "failed to suspend display audio\n");
4921 /* TODO: abort the succeeding gpu reset? */
4926 pm_runtime_disable(&(p->dev));
4931 static void amdgpu_device_recheck_guilty_jobs(
4932 struct amdgpu_device *adev, struct list_head *device_list_handle,
4933 struct amdgpu_reset_context *reset_context)
4937 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4938 struct amdgpu_ring *ring = adev->rings[i];
4940 struct drm_sched_job *s_job;
4942 if (!ring || !ring->sched.thread)
4945 s_job = list_first_entry_or_null(&ring->sched.pending_list,
4946 struct drm_sched_job, list);
4950 /* clear job's guilty and depend the folowing step to decide the real one */
4951 drm_sched_reset_karma(s_job);
4952 /* for the real bad job, it will be resubmitted twice, adding a dma_fence_get
4953 * to make sure fence is balanced */
4954 dma_fence_get(s_job->s_fence->parent);
4955 drm_sched_resubmit_jobs_ext(&ring->sched, 1);
4957 ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
4958 if (ret == 0) { /* timeout */
4959 DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
4960 ring->sched.name, s_job->id);
4963 drm_sched_increase_karma(s_job);
4966 if (amdgpu_sriov_vf(adev)) {
4967 amdgpu_virt_fini_data_exchange(adev);
4968 r = amdgpu_device_reset_sriov(adev, false);
4970 adev->asic_reset_res = r;
4972 clear_bit(AMDGPU_SKIP_HW_RESET,
4973 &reset_context->flags);
4974 r = amdgpu_do_asic_reset(device_list_handle,
4976 if (r && r == -EAGAIN)
4981 * add reset counter so that the following
4982 * resubmitted job could flush vmid
4984 atomic_inc(&adev->gpu_reset_counter);
4988 /* got the hw fence, signal finished fence */
4989 atomic_dec(ring->sched.score);
4990 dma_fence_put(s_job->s_fence->parent);
4991 dma_fence_get(&s_job->s_fence->finished);
4992 dma_fence_signal(&s_job->s_fence->finished);
4993 dma_fence_put(&s_job->s_fence->finished);
4995 /* remove node from list and free the job */
4996 spin_lock(&ring->sched.job_list_lock);
4997 list_del_init(&s_job->list);
4998 spin_unlock(&ring->sched.job_list_lock);
4999 ring->sched.ops->free_job(s_job);
5004 * amdgpu_device_gpu_recover_imp - reset the asic and recover scheduler
5006 * @adev: amdgpu_device pointer
5007 * @job: which job trigger hang
5009 * Attempt to reset the GPU if it has hung (all asics).
5010 * Attempt to do soft-reset or full-reset and reinitialize Asic
5011 * Returns 0 for success or an error on failure.
5014 int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
5015 struct amdgpu_job *job)
5017 struct list_head device_list, *device_list_handle = NULL;
5018 bool job_signaled = false;
5019 struct amdgpu_hive_info *hive = NULL;
5020 struct amdgpu_device *tmp_adev = NULL;
5022 bool need_emergency_restart = false;
5023 bool audio_suspended = false;
5024 int tmp_vram_lost_counter;
5025 struct amdgpu_reset_context reset_context;
5027 memset(&reset_context, 0, sizeof(reset_context));
5030 * Special case: RAS triggered and full reset isn't supported
5032 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5035 * Flush RAM to disk so that after reboot
5036 * the user can read log and see why the system rebooted.
5038 if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
5039 DRM_WARN("Emergency reboot.");
5042 emergency_restart();
5045 dev_info(adev->dev, "GPU %s begin!\n",
5046 need_emergency_restart ? "jobs stop":"reset");
5048 if (!amdgpu_sriov_vf(adev))
5049 hive = amdgpu_get_xgmi_hive(adev);
5051 mutex_lock(&hive->hive_lock);
5053 reset_context.method = AMD_RESET_METHOD_NONE;
5054 reset_context.reset_req_dev = adev;
5055 reset_context.job = job;
5056 reset_context.hive = hive;
5057 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5060 * Build list of devices to reset.
5061 * In case we are in XGMI hive mode, resort the device list
5062 * to put adev in the 1st position.
5064 INIT_LIST_HEAD(&device_list);
5065 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5066 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
5067 list_add_tail(&tmp_adev->reset_list, &device_list);
5068 if (!list_is_first(&adev->reset_list, &device_list))
5069 list_rotate_to_front(&adev->reset_list, &device_list);
5070 device_list_handle = &device_list;
5072 list_add_tail(&adev->reset_list, &device_list);
5073 device_list_handle = &device_list;
5076 /* We need to lock reset domain only once both for XGMI and single device */
5077 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5079 amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5081 /* block all schedulers and reset given job's ring */
5082 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5084 amdgpu_device_set_mp1_state(tmp_adev);
5087 * Try to put the audio codec into suspend state
5088 * before gpu reset started.
5090 * Due to the power domain of the graphics device
5091 * is shared with AZ power domain. Without this,
5092 * we may change the audio hardware from behind
5093 * the audio driver's back. That will trigger
5094 * some audio codec errors.
5096 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5097 audio_suspended = true;
5099 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5101 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5103 if (!amdgpu_sriov_vf(tmp_adev))
5104 amdgpu_amdkfd_pre_reset(tmp_adev);
5107 * Mark these ASICs to be reseted as untracked first
5108 * And add them back after reset completed
5110 amdgpu_unregister_gpu_instance(tmp_adev);
5112 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
5114 /* disable ras on ALL IPs */
5115 if (!need_emergency_restart &&
5116 amdgpu_device_ip_need_full_reset(tmp_adev))
5117 amdgpu_ras_suspend(tmp_adev);
5119 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5120 struct amdgpu_ring *ring = tmp_adev->rings[i];
5122 if (!ring || !ring->sched.thread)
5125 drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5127 if (need_emergency_restart)
5128 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5130 atomic_inc(&tmp_adev->gpu_reset_counter);
5133 if (need_emergency_restart)
5134 goto skip_sched_resume;
5137 * Must check guilty signal here since after this point all old
5138 * HW fences are force signaled.
5140 * job->base holds a reference to parent fence
5142 if (job && job->base.s_fence->parent &&
5143 dma_fence_is_signaled(job->base.s_fence->parent)) {
5144 job_signaled = true;
5145 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5149 retry: /* Rest of adevs pre asic reset from XGMI hive. */
5150 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5151 r = amdgpu_device_pre_asic_reset(tmp_adev, &reset_context);
5152 /*TODO Should we stop ?*/
5154 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5155 r, adev_to_drm(tmp_adev)->unique);
5156 tmp_adev->asic_reset_res = r;
5160 tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
5161 /* Actual ASIC resets if needed.*/
5162 /* Host driver will handle XGMI hive reset for SRIOV */
5163 if (amdgpu_sriov_vf(adev)) {
5164 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5166 adev->asic_reset_res = r;
5168 /* Aldebaran supports ras in SRIOV, so need resume ras during reset */
5169 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
5170 amdgpu_ras_resume(adev);
5172 r = amdgpu_do_asic_reset(device_list_handle, &reset_context);
5173 if (r && r == -EAGAIN)
5179 /* Post ASIC reset for all devs .*/
5180 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5183 * Sometimes a later bad compute job can block a good gfx job as gfx
5184 * and compute ring share internal GC HW mutually. We add an additional
5185 * guilty jobs recheck step to find the real guilty job, it synchronously
5186 * submits and pends for the first job being signaled. If it gets timeout,
5187 * we identify it as a real guilty job.
5189 if (amdgpu_gpu_recovery == 2 &&
5190 !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
5191 amdgpu_device_recheck_guilty_jobs(
5192 tmp_adev, device_list_handle, &reset_context);
5194 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5195 struct amdgpu_ring *ring = tmp_adev->rings[i];
5197 if (!ring || !ring->sched.thread)
5200 /* No point to resubmit jobs if we didn't HW reset*/
5201 if (!tmp_adev->asic_reset_res && !job_signaled)
5202 drm_sched_resubmit_jobs(&ring->sched);
5204 drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
5207 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
5208 drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5211 if (tmp_adev->asic_reset_res)
5212 r = tmp_adev->asic_reset_res;
5214 tmp_adev->asic_reset_res = 0;
5217 /* bad news, how to tell it to userspace ? */
5218 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5219 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5221 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5222 if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5223 DRM_WARN("smart shift update failed\n");
5228 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5229 /* unlock kfd: SRIOV would do it separately */
5230 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5231 amdgpu_amdkfd_post_reset(tmp_adev);
5233 /* kfd_post_reset will do nothing if kfd device is not initialized,
5234 * need to bring up kfd here if it's not be initialized before
5236 if (!adev->kfd.init_complete)
5237 amdgpu_amdkfd_device_init(adev);
5239 if (audio_suspended)
5240 amdgpu_device_resume_display_audio(tmp_adev);
5242 amdgpu_device_unset_mp1_state(tmp_adev);
5245 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5247 amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5250 mutex_unlock(&hive->hive_lock);
5251 amdgpu_put_xgmi_hive(hive);
5255 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5259 struct amdgpu_recover_work_struct {
5260 struct work_struct base;
5261 struct amdgpu_device *adev;
5262 struct amdgpu_job *job;
5266 static void amdgpu_device_queue_gpu_recover_work(struct work_struct *work)
5268 struct amdgpu_recover_work_struct *recover_work = container_of(work, struct amdgpu_recover_work_struct, base);
5270 recover_work->ret = amdgpu_device_gpu_recover_imp(recover_work->adev, recover_work->job);
5273 * Serialize gpu recover into reset domain single threaded wq
5275 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5276 struct amdgpu_job *job)
5278 struct amdgpu_recover_work_struct work = {.adev = adev, .job = job};
5280 INIT_WORK(&work.base, amdgpu_device_queue_gpu_recover_work);
5282 if (!amdgpu_reset_domain_schedule(adev->reset_domain, &work.base))
5285 flush_work(&work.base);
5291 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5293 * @adev: amdgpu_device pointer
5295 * Fetchs and stores in the driver the PCIE capabilities (gen speed
5296 * and lanes) of the slot the device is in. Handles APUs and
5297 * virtualized environments where PCIE config space may not be available.
5299 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5301 struct pci_dev *pdev;
5302 enum pci_bus_speed speed_cap, platform_speed_cap;
5303 enum pcie_link_width platform_link_width;
5305 if (amdgpu_pcie_gen_cap)
5306 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5308 if (amdgpu_pcie_lane_cap)
5309 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5311 /* covers APUs as well */
5312 if (pci_is_root_bus(adev->pdev->bus)) {
5313 if (adev->pm.pcie_gen_mask == 0)
5314 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5315 if (adev->pm.pcie_mlw_mask == 0)
5316 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5320 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5323 pcie_bandwidth_available(adev->pdev, NULL,
5324 &platform_speed_cap, &platform_link_width);
5326 if (adev->pm.pcie_gen_mask == 0) {
5329 speed_cap = pcie_get_speed_cap(pdev);
5330 if (speed_cap == PCI_SPEED_UNKNOWN) {
5331 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5332 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5333 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5335 if (speed_cap == PCIE_SPEED_32_0GT)
5336 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5337 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5338 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5339 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5340 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5341 else if (speed_cap == PCIE_SPEED_16_0GT)
5342 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5343 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5344 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5345 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5346 else if (speed_cap == PCIE_SPEED_8_0GT)
5347 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5348 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5349 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5350 else if (speed_cap == PCIE_SPEED_5_0GT)
5351 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5352 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5354 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5357 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5358 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5359 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5361 if (platform_speed_cap == PCIE_SPEED_32_0GT)
5362 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5363 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5364 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5365 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5366 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5367 else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5368 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5369 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5370 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5371 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5372 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5373 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5374 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5375 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5376 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5377 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5378 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5380 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5384 if (adev->pm.pcie_mlw_mask == 0) {
5385 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5386 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5388 switch (platform_link_width) {
5390 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5391 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5392 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5393 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5394 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5395 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5396 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5399 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5400 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5401 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5402 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5403 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5404 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5407 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5408 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5409 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5410 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5411 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5414 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5415 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5416 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5417 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5420 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5421 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5422 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5425 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5426 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5429 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5438 int amdgpu_device_baco_enter(struct drm_device *dev)
5440 struct amdgpu_device *adev = drm_to_adev(dev);
5441 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5443 if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5446 if (ras && adev->ras_enabled &&
5447 adev->nbio.funcs->enable_doorbell_interrupt)
5448 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5450 return amdgpu_dpm_baco_enter(adev);
5453 int amdgpu_device_baco_exit(struct drm_device *dev)
5455 struct amdgpu_device *adev = drm_to_adev(dev);
5456 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5459 if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5462 ret = amdgpu_dpm_baco_exit(adev);
5466 if (ras && adev->ras_enabled &&
5467 adev->nbio.funcs->enable_doorbell_interrupt)
5468 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5470 if (amdgpu_passthrough(adev) &&
5471 adev->nbio.funcs->clear_doorbell_interrupt)
5472 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5478 * amdgpu_pci_error_detected - Called when a PCI error is detected.
5479 * @pdev: PCI device struct
5480 * @state: PCI channel state
5482 * Description: Called when a PCI error is detected.
5484 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5486 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5488 struct drm_device *dev = pci_get_drvdata(pdev);
5489 struct amdgpu_device *adev = drm_to_adev(dev);
5492 DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5494 if (adev->gmc.xgmi.num_physical_nodes > 1) {
5495 DRM_WARN("No support for XGMI hive yet...");
5496 return PCI_ERS_RESULT_DISCONNECT;
5499 adev->pci_channel_state = state;
5502 case pci_channel_io_normal:
5503 return PCI_ERS_RESULT_CAN_RECOVER;
5504 /* Fatal error, prepare for slot reset */
5505 case pci_channel_io_frozen:
5507 * Locking adev->reset_domain->sem will prevent any external access
5508 * to GPU during PCI error recovery
5510 amdgpu_device_lock_reset_domain(adev->reset_domain);
5511 amdgpu_device_set_mp1_state(adev);
5514 * Block any work scheduling as we do for regular GPU reset
5515 * for the duration of the recovery
5517 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5518 struct amdgpu_ring *ring = adev->rings[i];
5520 if (!ring || !ring->sched.thread)
5523 drm_sched_stop(&ring->sched, NULL);
5525 atomic_inc(&adev->gpu_reset_counter);
5526 return PCI_ERS_RESULT_NEED_RESET;
5527 case pci_channel_io_perm_failure:
5528 /* Permanent error, prepare for device removal */
5529 return PCI_ERS_RESULT_DISCONNECT;
5532 return PCI_ERS_RESULT_NEED_RESET;
5536 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5537 * @pdev: pointer to PCI device
5539 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5542 DRM_INFO("PCI error: mmio enabled callback!!\n");
5544 /* TODO - dump whatever for debugging purposes */
5546 /* This called only if amdgpu_pci_error_detected returns
5547 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5548 * works, no need to reset slot.
5551 return PCI_ERS_RESULT_RECOVERED;
5555 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5556 * @pdev: PCI device struct
5558 * Description: This routine is called by the pci error recovery
5559 * code after the PCI slot has been reset, just before we
5560 * should resume normal operations.
5562 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5564 struct drm_device *dev = pci_get_drvdata(pdev);
5565 struct amdgpu_device *adev = drm_to_adev(dev);
5567 struct amdgpu_reset_context reset_context;
5569 struct list_head device_list;
5571 DRM_INFO("PCI error: slot reset callback!!\n");
5573 memset(&reset_context, 0, sizeof(reset_context));
5575 INIT_LIST_HEAD(&device_list);
5576 list_add_tail(&adev->reset_list, &device_list);
5578 /* wait for asic to come out of reset */
5581 /* Restore PCI confspace */
5582 amdgpu_device_load_pci_state(pdev);
5584 /* confirm ASIC came out of reset */
5585 for (i = 0; i < adev->usec_timeout; i++) {
5586 memsize = amdgpu_asic_get_config_memsize(adev);
5588 if (memsize != 0xffffffff)
5592 if (memsize == 0xffffffff) {
5597 reset_context.method = AMD_RESET_METHOD_NONE;
5598 reset_context.reset_req_dev = adev;
5599 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5600 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5602 adev->no_hw_access = true;
5603 r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5604 adev->no_hw_access = false;
5608 r = amdgpu_do_asic_reset(&device_list, &reset_context);
5612 if (amdgpu_device_cache_pci_state(adev->pdev))
5613 pci_restore_state(adev->pdev);
5615 DRM_INFO("PCIe error recovery succeeded\n");
5617 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5618 amdgpu_device_unset_mp1_state(adev);
5619 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5622 return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5626 * amdgpu_pci_resume() - resume normal ops after PCI reset
5627 * @pdev: pointer to PCI device
5629 * Called when the error recovery driver tells us that its
5630 * OK to resume normal operation.
5632 void amdgpu_pci_resume(struct pci_dev *pdev)
5634 struct drm_device *dev = pci_get_drvdata(pdev);
5635 struct amdgpu_device *adev = drm_to_adev(dev);
5639 DRM_INFO("PCI error: resume callback!!\n");
5641 /* Only continue execution for the case of pci_channel_io_frozen */
5642 if (adev->pci_channel_state != pci_channel_io_frozen)
5645 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5646 struct amdgpu_ring *ring = adev->rings[i];
5648 if (!ring || !ring->sched.thread)
5652 drm_sched_resubmit_jobs(&ring->sched);
5653 drm_sched_start(&ring->sched, true);
5656 amdgpu_device_unset_mp1_state(adev);
5657 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5660 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5662 struct drm_device *dev = pci_get_drvdata(pdev);
5663 struct amdgpu_device *adev = drm_to_adev(dev);
5666 r = pci_save_state(pdev);
5668 kfree(adev->pci_state);
5670 adev->pci_state = pci_store_saved_state(pdev);
5672 if (!adev->pci_state) {
5673 DRM_ERROR("Failed to store PCI saved state");
5677 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5684 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5686 struct drm_device *dev = pci_get_drvdata(pdev);
5687 struct amdgpu_device *adev = drm_to_adev(dev);
5690 if (!adev->pci_state)
5693 r = pci_load_saved_state(pdev, adev->pci_state);
5696 pci_restore_state(pdev);
5698 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5705 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5706 struct amdgpu_ring *ring)
5708 #ifdef CONFIG_X86_64
5709 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5712 if (adev->gmc.xgmi.connected_to_cpu)
5715 if (ring && ring->funcs->emit_hdp_flush)
5716 amdgpu_ring_emit_hdp_flush(ring);
5718 amdgpu_asic_flush_hdp(adev, ring);
5721 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5722 struct amdgpu_ring *ring)
5724 #ifdef CONFIG_X86_64
5725 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5728 if (adev->gmc.xgmi.connected_to_cpu)
5731 amdgpu_asic_invalidate_hdp(adev, ring);
5734 int amdgpu_in_reset(struct amdgpu_device *adev)
5736 return atomic_read(&adev->reset_domain->in_gpu_reset);
5740 * amdgpu_device_halt() - bring hardware to some kind of halt state
5742 * @adev: amdgpu_device pointer
5744 * Bring hardware to some kind of halt state so that no one can touch it
5745 * any more. It will help to maintain error context when error occurred.
5746 * Compare to a simple hang, the system will keep stable at least for SSH
5747 * access. Then it should be trivial to inspect the hardware state and
5748 * see what's going on. Implemented as following:
5750 * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
5751 * clears all CPU mappings to device, disallows remappings through page faults
5752 * 2. amdgpu_irq_disable_all() disables all interrupts
5753 * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
5754 * 4. set adev->no_hw_access to avoid potential crashes after setp 5
5755 * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
5756 * 6. pci_disable_device() and pci_wait_for_pending_transaction()
5757 * flush any in flight DMA operations
5759 void amdgpu_device_halt(struct amdgpu_device *adev)
5761 struct pci_dev *pdev = adev->pdev;
5762 struct drm_device *ddev = adev_to_drm(adev);
5764 drm_dev_unplug(ddev);
5766 amdgpu_irq_disable_all(adev);
5768 amdgpu_fence_driver_hw_fini(adev);
5770 adev->no_hw_access = true;
5772 amdgpu_device_unmap_mmio(adev);
5774 pci_disable_device(pdev);
5775 pci_wait_for_pending_transaction(pdev);
5778 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
5781 unsigned long flags, address, data;
5784 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5785 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5787 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5788 WREG32(address, reg * 4);
5789 (void)RREG32(address);
5791 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5795 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
5798 unsigned long flags, address, data;
5800 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5801 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5803 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5804 WREG32(address, reg * 4);
5805 (void)RREG32(address);
5808 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);