2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_probe_helper.h>
36 #include <drm/amdgpu_drm.h>
37 #include <linux/vgaarb.h>
38 #include <linux/vga_switcheroo.h>
39 #include <linux/efi.h>
41 #include "amdgpu_trace.h"
42 #include "amdgpu_i2c.h"
44 #include "amdgpu_atombios.h"
45 #include "amdgpu_atomfirmware.h"
47 #ifdef CONFIG_DRM_AMDGPU_SI
50 #ifdef CONFIG_DRM_AMDGPU_CIK
56 #include "bif/bif_4_1_d.h"
57 #include <linux/pci.h>
58 #include <linux/firmware.h>
59 #include "amdgpu_vf_error.h"
61 #include "amdgpu_amdkfd.h"
62 #include "amdgpu_pm.h"
64 #include "amdgpu_xgmi.h"
65 #include "amdgpu_ras.h"
66 #include "amdgpu_pmu.h"
67 #include "amdgpu_fru_eeprom.h"
68 #include "amdgpu_reset.h"
70 #include <linux/suspend.h>
71 #include <drm/task_barrier.h>
72 #include <linux/pm_runtime.h>
74 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
75 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
76 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
77 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
78 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
79 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
80 MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
86 #define AMDGPU_RESUME_MS 2000
88 const char *amdgpu_asic_name[] = {
127 * DOC: pcie_replay_count
129 * The amdgpu driver provides a sysfs API for reporting the total number
130 * of PCIe replays (NAKs)
131 * The file pcie_replay_count is used for this and returns the total
132 * number of replays as a sum of the NAKs generated and NAKs received
135 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
136 struct device_attribute *attr, char *buf)
138 struct drm_device *ddev = dev_get_drvdata(dev);
139 struct amdgpu_device *adev = drm_to_adev(ddev);
140 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
142 return sysfs_emit(buf, "%llu\n", cnt);
145 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
146 amdgpu_device_get_pcie_replay_count, NULL);
148 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
153 * The amdgpu driver provides a sysfs API for reporting the product name
155 * The file serial_number is used for this and returns the product name
156 * as returned from the FRU.
157 * NOTE: This is only available for certain server cards
160 static ssize_t amdgpu_device_get_product_name(struct device *dev,
161 struct device_attribute *attr, char *buf)
163 struct drm_device *ddev = dev_get_drvdata(dev);
164 struct amdgpu_device *adev = drm_to_adev(ddev);
166 return sysfs_emit(buf, "%s\n", adev->product_name);
169 static DEVICE_ATTR(product_name, S_IRUGO,
170 amdgpu_device_get_product_name, NULL);
173 * DOC: product_number
175 * The amdgpu driver provides a sysfs API for reporting the part number
177 * The file serial_number is used for this and returns the part number
178 * as returned from the FRU.
179 * NOTE: This is only available for certain server cards
182 static ssize_t amdgpu_device_get_product_number(struct device *dev,
183 struct device_attribute *attr, char *buf)
185 struct drm_device *ddev = dev_get_drvdata(dev);
186 struct amdgpu_device *adev = drm_to_adev(ddev);
188 return sysfs_emit(buf, "%s\n", adev->product_number);
191 static DEVICE_ATTR(product_number, S_IRUGO,
192 amdgpu_device_get_product_number, NULL);
197 * The amdgpu driver provides a sysfs API for reporting the serial number
199 * The file serial_number is used for this and returns the serial number
200 * as returned from the FRU.
201 * NOTE: This is only available for certain server cards
204 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
205 struct device_attribute *attr, char *buf)
207 struct drm_device *ddev = dev_get_drvdata(dev);
208 struct amdgpu_device *adev = drm_to_adev(ddev);
210 return sysfs_emit(buf, "%s\n", adev->serial);
213 static DEVICE_ATTR(serial_number, S_IRUGO,
214 amdgpu_device_get_serial_number, NULL);
217 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
219 * @dev: drm_device pointer
221 * Returns true if the device is a dGPU with ATPX power control,
222 * otherwise return false.
224 bool amdgpu_device_supports_px(struct drm_device *dev)
226 struct amdgpu_device *adev = drm_to_adev(dev);
228 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
234 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
236 * @dev: drm_device pointer
238 * Returns true if the device is a dGPU with ACPI power control,
239 * otherwise return false.
241 bool amdgpu_device_supports_boco(struct drm_device *dev)
243 struct amdgpu_device *adev = drm_to_adev(dev);
246 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
252 * amdgpu_device_supports_baco - Does the device support BACO
254 * @dev: drm_device pointer
256 * Returns true if the device supporte BACO,
257 * otherwise return false.
259 bool amdgpu_device_supports_baco(struct drm_device *dev)
261 struct amdgpu_device *adev = drm_to_adev(dev);
263 return amdgpu_asic_supports_baco(adev);
267 * VRAM access helper functions
271 * amdgpu_device_vram_access - read/write a buffer in vram
273 * @adev: amdgpu_device pointer
274 * @pos: offset of the buffer in vram
275 * @buf: virtual address of the buffer in system memory
276 * @size: read/write size, sizeof(@buf) must > @size
277 * @write: true - write to vram, otherwise - read from vram
279 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
280 uint32_t *buf, size_t size, bool write)
288 last = min(pos + size, adev->gmc.visible_vram_size);
290 void __iomem *addr = adev->mman.aper_base_kaddr + pos;
291 size_t count = last - pos;
294 memcpy_toio(addr, buf, count);
296 amdgpu_asic_flush_hdp(adev, NULL);
298 amdgpu_asic_invalidate_hdp(adev, NULL);
300 memcpy_fromio(buf, addr, count);
312 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
313 for (last = pos + size; pos < last; pos += 4) {
314 uint32_t tmp = pos >> 31;
316 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
318 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
322 WREG32_NO_KIQ(mmMM_DATA, *buf++);
324 *buf++ = RREG32_NO_KIQ(mmMM_DATA);
326 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
330 * register access helper functions.
333 /* Check if hw access should be skipped because of hotplug or device error */
334 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
336 if (adev->in_pci_err_recovery)
339 #ifdef CONFIG_LOCKDEP
341 * This is a bit complicated to understand, so worth a comment. What we assert
342 * here is that the GPU reset is not running on another thread in parallel.
344 * For this we trylock the read side of the reset semaphore, if that succeeds
345 * we know that the reset is not running in paralell.
347 * If the trylock fails we assert that we are either already holding the read
348 * side of the lock or are the reset thread itself and hold the write side of
352 if (down_read_trylock(&adev->reset_sem))
353 up_read(&adev->reset_sem);
355 lockdep_assert_held(&adev->reset_sem);
362 * amdgpu_device_rreg - read a memory mapped IO or indirect register
364 * @adev: amdgpu_device pointer
365 * @reg: dword aligned register offset
366 * @acc_flags: access flags which require special behavior
368 * Returns the 32 bit value from the offset specified.
370 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
371 uint32_t reg, uint32_t acc_flags)
375 if (amdgpu_device_skip_hw_access(adev))
378 if ((reg * 4) < adev->rmmio_size) {
379 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
380 amdgpu_sriov_runtime(adev) &&
381 down_read_trylock(&adev->reset_sem)) {
382 ret = amdgpu_kiq_rreg(adev, reg);
383 up_read(&adev->reset_sem);
385 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
388 ret = adev->pcie_rreg(adev, reg * 4);
391 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
397 * MMIO register read with bytes helper functions
398 * @offset:bytes offset from MMIO start
403 * amdgpu_mm_rreg8 - read a memory mapped IO register
405 * @adev: amdgpu_device pointer
406 * @offset: byte aligned register offset
408 * Returns the 8 bit value from the offset specified.
410 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
412 if (amdgpu_device_skip_hw_access(adev))
415 if (offset < adev->rmmio_size)
416 return (readb(adev->rmmio + offset));
421 * MMIO register write with bytes helper functions
422 * @offset:bytes offset from MMIO start
423 * @value: the value want to be written to the register
427 * amdgpu_mm_wreg8 - read a memory mapped IO register
429 * @adev: amdgpu_device pointer
430 * @offset: byte aligned register offset
431 * @value: 8 bit value to write
433 * Writes the value specified to the offset specified.
435 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
437 if (amdgpu_device_skip_hw_access(adev))
440 if (offset < adev->rmmio_size)
441 writeb(value, adev->rmmio + offset);
447 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
449 * @adev: amdgpu_device pointer
450 * @reg: dword aligned register offset
451 * @v: 32 bit value to write to the register
452 * @acc_flags: access flags which require special behavior
454 * Writes the value specified to the offset specified.
456 void amdgpu_device_wreg(struct amdgpu_device *adev,
457 uint32_t reg, uint32_t v,
460 if (amdgpu_device_skip_hw_access(adev))
463 if ((reg * 4) < adev->rmmio_size) {
464 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
465 amdgpu_sriov_runtime(adev) &&
466 down_read_trylock(&adev->reset_sem)) {
467 amdgpu_kiq_wreg(adev, reg, v);
468 up_read(&adev->reset_sem);
470 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
473 adev->pcie_wreg(adev, reg * 4, v);
476 trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
480 * amdgpu_mm_wreg_mmio_rlc - write register either with mmio or with RLC path if in range
482 * this function is invoked only the debugfs register access
484 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
485 uint32_t reg, uint32_t v)
487 if (amdgpu_device_skip_hw_access(adev))
490 if (amdgpu_sriov_fullaccess(adev) &&
491 adev->gfx.rlc.funcs &&
492 adev->gfx.rlc.funcs->is_rlcg_access_range) {
493 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
494 return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v, 0, 0);
496 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
501 * amdgpu_mm_rdoorbell - read a doorbell dword
503 * @adev: amdgpu_device pointer
504 * @index: doorbell index
506 * Returns the value in the doorbell aperture at the
507 * requested doorbell index (CIK).
509 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
511 if (amdgpu_device_skip_hw_access(adev))
514 if (index < adev->doorbell.num_doorbells) {
515 return readl(adev->doorbell.ptr + index);
517 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
523 * amdgpu_mm_wdoorbell - write a doorbell dword
525 * @adev: amdgpu_device pointer
526 * @index: doorbell index
529 * Writes @v to the doorbell aperture at the
530 * requested doorbell index (CIK).
532 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
534 if (amdgpu_device_skip_hw_access(adev))
537 if (index < adev->doorbell.num_doorbells) {
538 writel(v, adev->doorbell.ptr + index);
540 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
545 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
547 * @adev: amdgpu_device pointer
548 * @index: doorbell index
550 * Returns the value in the doorbell aperture at the
551 * requested doorbell index (VEGA10+).
553 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
555 if (amdgpu_device_skip_hw_access(adev))
558 if (index < adev->doorbell.num_doorbells) {
559 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
561 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
567 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
569 * @adev: amdgpu_device pointer
570 * @index: doorbell index
573 * Writes @v to the doorbell aperture at the
574 * requested doorbell index (VEGA10+).
576 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
578 if (amdgpu_device_skip_hw_access(adev))
581 if (index < adev->doorbell.num_doorbells) {
582 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
584 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
589 * amdgpu_device_indirect_rreg - read an indirect register
591 * @adev: amdgpu_device pointer
592 * @pcie_index: mmio register offset
593 * @pcie_data: mmio register offset
594 * @reg_addr: indirect register address to read from
596 * Returns the value of indirect register @reg_addr
598 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
599 u32 pcie_index, u32 pcie_data,
604 void __iomem *pcie_index_offset;
605 void __iomem *pcie_data_offset;
607 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
608 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
609 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
611 writel(reg_addr, pcie_index_offset);
612 readl(pcie_index_offset);
613 r = readl(pcie_data_offset);
614 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
620 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
622 * @adev: amdgpu_device pointer
623 * @pcie_index: mmio register offset
624 * @pcie_data: mmio register offset
625 * @reg_addr: indirect register address to read from
627 * Returns the value of indirect register @reg_addr
629 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
630 u32 pcie_index, u32 pcie_data,
635 void __iomem *pcie_index_offset;
636 void __iomem *pcie_data_offset;
638 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
639 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
640 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
642 /* read low 32 bits */
643 writel(reg_addr, pcie_index_offset);
644 readl(pcie_index_offset);
645 r = readl(pcie_data_offset);
646 /* read high 32 bits */
647 writel(reg_addr + 4, pcie_index_offset);
648 readl(pcie_index_offset);
649 r |= ((u64)readl(pcie_data_offset) << 32);
650 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
656 * amdgpu_device_indirect_wreg - write an indirect register address
658 * @adev: amdgpu_device pointer
659 * @pcie_index: mmio register offset
660 * @pcie_data: mmio register offset
661 * @reg_addr: indirect register offset
662 * @reg_data: indirect register data
665 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
666 u32 pcie_index, u32 pcie_data,
667 u32 reg_addr, u32 reg_data)
670 void __iomem *pcie_index_offset;
671 void __iomem *pcie_data_offset;
673 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
674 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
675 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
677 writel(reg_addr, pcie_index_offset);
678 readl(pcie_index_offset);
679 writel(reg_data, pcie_data_offset);
680 readl(pcie_data_offset);
681 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
685 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
687 * @adev: amdgpu_device pointer
688 * @pcie_index: mmio register offset
689 * @pcie_data: mmio register offset
690 * @reg_addr: indirect register offset
691 * @reg_data: indirect register data
694 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
695 u32 pcie_index, u32 pcie_data,
696 u32 reg_addr, u64 reg_data)
699 void __iomem *pcie_index_offset;
700 void __iomem *pcie_data_offset;
702 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
703 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
704 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
706 /* write low 32 bits */
707 writel(reg_addr, pcie_index_offset);
708 readl(pcie_index_offset);
709 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
710 readl(pcie_data_offset);
711 /* write high 32 bits */
712 writel(reg_addr + 4, pcie_index_offset);
713 readl(pcie_index_offset);
714 writel((u32)(reg_data >> 32), pcie_data_offset);
715 readl(pcie_data_offset);
716 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
720 * amdgpu_invalid_rreg - dummy reg read function
722 * @adev: amdgpu_device pointer
723 * @reg: offset of register
725 * Dummy register read function. Used for register blocks
726 * that certain asics don't have (all asics).
727 * Returns the value in the register.
729 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
731 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
737 * amdgpu_invalid_wreg - dummy reg write function
739 * @adev: amdgpu_device pointer
740 * @reg: offset of register
741 * @v: value to write to the register
743 * Dummy register read function. Used for register blocks
744 * that certain asics don't have (all asics).
746 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
748 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
754 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
756 * @adev: amdgpu_device pointer
757 * @reg: offset of register
759 * Dummy register read function. Used for register blocks
760 * that certain asics don't have (all asics).
761 * Returns the value in the register.
763 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
765 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
771 * amdgpu_invalid_wreg64 - dummy reg write function
773 * @adev: amdgpu_device pointer
774 * @reg: offset of register
775 * @v: value to write to the register
777 * Dummy register read function. Used for register blocks
778 * that certain asics don't have (all asics).
780 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
782 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
788 * amdgpu_block_invalid_rreg - dummy reg read function
790 * @adev: amdgpu_device pointer
791 * @block: offset of instance
792 * @reg: offset of register
794 * Dummy register read function. Used for register blocks
795 * that certain asics don't have (all asics).
796 * Returns the value in the register.
798 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
799 uint32_t block, uint32_t reg)
801 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
808 * amdgpu_block_invalid_wreg - dummy reg write function
810 * @adev: amdgpu_device pointer
811 * @block: offset of instance
812 * @reg: offset of register
813 * @v: value to write to the register
815 * Dummy register read function. Used for register blocks
816 * that certain asics don't have (all asics).
818 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
820 uint32_t reg, uint32_t v)
822 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
828 * amdgpu_device_asic_init - Wrapper for atom asic_init
830 * @adev: amdgpu_device pointer
832 * Does any asic specific work and then calls atom asic init.
834 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
836 amdgpu_asic_pre_asic_init(adev);
838 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
842 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
844 * @adev: amdgpu_device pointer
846 * Allocates a scratch page of VRAM for use by various things in the
849 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
851 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
852 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
853 &adev->vram_scratch.robj,
854 &adev->vram_scratch.gpu_addr,
855 (void **)&adev->vram_scratch.ptr);
859 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
861 * @adev: amdgpu_device pointer
863 * Frees the VRAM scratch page.
865 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
867 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
871 * amdgpu_device_program_register_sequence - program an array of registers.
873 * @adev: amdgpu_device pointer
874 * @registers: pointer to the register array
875 * @array_size: size of the register array
877 * Programs an array or registers with and and or masks.
878 * This is a helper for setting golden registers.
880 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
881 const u32 *registers,
882 const u32 array_size)
884 u32 tmp, reg, and_mask, or_mask;
890 for (i = 0; i < array_size; i +=3) {
891 reg = registers[i + 0];
892 and_mask = registers[i + 1];
893 or_mask = registers[i + 2];
895 if (and_mask == 0xffffffff) {
900 if (adev->family >= AMDGPU_FAMILY_AI)
901 tmp |= (or_mask & and_mask);
910 * amdgpu_device_pci_config_reset - reset the GPU
912 * @adev: amdgpu_device pointer
914 * Resets the GPU using the pci config reset sequence.
915 * Only applicable to asics prior to vega10.
917 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
919 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
923 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
925 * @adev: amdgpu_device pointer
927 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
929 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
931 return pci_reset_function(adev->pdev);
935 * GPU doorbell aperture helpers function.
938 * amdgpu_device_doorbell_init - Init doorbell driver information.
940 * @adev: amdgpu_device pointer
942 * Init doorbell driver information (CIK)
943 * Returns 0 on success, error on failure.
945 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
948 /* No doorbell on SI hardware generation */
949 if (adev->asic_type < CHIP_BONAIRE) {
950 adev->doorbell.base = 0;
951 adev->doorbell.size = 0;
952 adev->doorbell.num_doorbells = 0;
953 adev->doorbell.ptr = NULL;
957 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
960 amdgpu_asic_init_doorbell_index(adev);
962 /* doorbell bar mapping */
963 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
964 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
966 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
967 adev->doorbell_index.max_assignment+1);
968 if (adev->doorbell.num_doorbells == 0)
971 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
972 * paging queue doorbell use the second page. The
973 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
974 * doorbells are in the first page. So with paging queue enabled,
975 * the max num_doorbells should + 1 page (0x400 in dword)
977 if (adev->asic_type >= CHIP_VEGA10)
978 adev->doorbell.num_doorbells += 0x400;
980 adev->doorbell.ptr = ioremap(adev->doorbell.base,
981 adev->doorbell.num_doorbells *
983 if (adev->doorbell.ptr == NULL)
990 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
992 * @adev: amdgpu_device pointer
994 * Tear down doorbell driver information (CIK)
996 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
998 iounmap(adev->doorbell.ptr);
999 adev->doorbell.ptr = NULL;
1005 * amdgpu_device_wb_*()
1006 * Writeback is the method by which the GPU updates special pages in memory
1007 * with the status of certain GPU events (fences, ring pointers,etc.).
1011 * amdgpu_device_wb_fini - Disable Writeback and free memory
1013 * @adev: amdgpu_device pointer
1015 * Disables Writeback and frees the Writeback memory (all asics).
1016 * Used at driver shutdown.
1018 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1020 if (adev->wb.wb_obj) {
1021 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1023 (void **)&adev->wb.wb);
1024 adev->wb.wb_obj = NULL;
1029 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
1031 * @adev: amdgpu_device pointer
1033 * Initializes writeback and allocates writeback memory (all asics).
1034 * Used at driver startup.
1035 * Returns 0 on success or an -error on failure.
1037 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1041 if (adev->wb.wb_obj == NULL) {
1042 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1043 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1044 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1045 &adev->wb.wb_obj, &adev->wb.gpu_addr,
1046 (void **)&adev->wb.wb);
1048 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1052 adev->wb.num_wb = AMDGPU_MAX_WB;
1053 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1055 /* clear wb memory */
1056 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1063 * amdgpu_device_wb_get - Allocate a wb entry
1065 * @adev: amdgpu_device pointer
1068 * Allocate a wb slot for use by the driver (all asics).
1069 * Returns 0 on success or -EINVAL on failure.
1071 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1073 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1075 if (offset < adev->wb.num_wb) {
1076 __set_bit(offset, adev->wb.used);
1077 *wb = offset << 3; /* convert to dw offset */
1085 * amdgpu_device_wb_free - Free a wb entry
1087 * @adev: amdgpu_device pointer
1090 * Free a wb slot allocated for use by the driver (all asics)
1092 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1095 if (wb < adev->wb.num_wb)
1096 __clear_bit(wb, adev->wb.used);
1100 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1102 * @adev: amdgpu_device pointer
1104 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1105 * to fail, but if any of the BARs is not accessible after the size we abort
1106 * driver loading by returning -ENODEV.
1108 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1110 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1111 struct pci_bus *root;
1112 struct resource *res;
1118 if (amdgpu_sriov_vf(adev))
1121 /* skip if the bios has already enabled large BAR */
1122 if (adev->gmc.real_vram_size &&
1123 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1126 /* Check if the root BUS has 64bit memory resources */
1127 root = adev->pdev->bus;
1128 while (root->parent)
1129 root = root->parent;
1131 pci_bus_for_each_resource(root, res, i) {
1132 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1133 res->start > 0x100000000ull)
1137 /* Trying to resize is pointless without a root hub window above 4GB */
1141 /* Limit the BAR size to what is available */
1142 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1145 /* Disable memory decoding while we change the BAR addresses and size */
1146 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1147 pci_write_config_word(adev->pdev, PCI_COMMAND,
1148 cmd & ~PCI_COMMAND_MEMORY);
1150 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1151 amdgpu_device_doorbell_fini(adev);
1152 if (adev->asic_type >= CHIP_BONAIRE)
1153 pci_release_resource(adev->pdev, 2);
1155 pci_release_resource(adev->pdev, 0);
1157 r = pci_resize_resource(adev->pdev, 0, rbar_size);
1159 DRM_INFO("Not enough PCI address space for a large BAR.");
1160 else if (r && r != -ENOTSUPP)
1161 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1163 pci_assign_unassigned_bus_resources(adev->pdev->bus);
1165 /* When the doorbell or fb BAR isn't available we have no chance of
1168 r = amdgpu_device_doorbell_init(adev);
1169 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1172 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1178 * GPU helpers function.
1181 * amdgpu_device_need_post - check if the hw need post or not
1183 * @adev: amdgpu_device pointer
1185 * Check if the asic has been initialized (all asics) at driver startup
1186 * or post is needed if hw reset is performed.
1187 * Returns true if need or false if not.
1189 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1193 if (amdgpu_sriov_vf(adev))
1196 if (amdgpu_passthrough(adev)) {
1197 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1198 * some old smc fw still need driver do vPost otherwise gpu hang, while
1199 * those smc fw version above 22.15 doesn't have this flaw, so we force
1200 * vpost executed for smc version below 22.15
1202 if (adev->asic_type == CHIP_FIJI) {
1205 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1206 /* force vPost if error occured */
1210 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1211 if (fw_ver < 0x00160e00)
1216 /* Don't post if we need to reset whole hive on init */
1217 if (adev->gmc.xgmi.pending_reset)
1220 if (adev->has_hw_reset) {
1221 adev->has_hw_reset = false;
1225 /* bios scratch used on CIK+ */
1226 if (adev->asic_type >= CHIP_BONAIRE)
1227 return amdgpu_atombios_scratch_need_asic_init(adev);
1229 /* check MEM_SIZE for older asics */
1230 reg = amdgpu_asic_get_config_memsize(adev);
1232 if ((reg != 0) && (reg != 0xffffffff))
1238 /* if we get transitioned to only one device, take VGA back */
1240 * amdgpu_device_vga_set_decode - enable/disable vga decode
1242 * @cookie: amdgpu_device pointer
1243 * @state: enable/disable vga decode
1245 * Enable/disable vga decode (all asics).
1246 * Returns VGA resource flags.
1248 static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
1250 struct amdgpu_device *adev = cookie;
1251 amdgpu_asic_set_vga_state(adev, state);
1253 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1254 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1256 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1260 * amdgpu_device_check_block_size - validate the vm block size
1262 * @adev: amdgpu_device pointer
1264 * Validates the vm block size specified via module parameter.
1265 * The vm block size defines number of bits in page table versus page directory,
1266 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1267 * page table and the remaining bits are in the page directory.
1269 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1271 /* defines number of bits in page table versus page directory,
1272 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1273 * page table and the remaining bits are in the page directory */
1274 if (amdgpu_vm_block_size == -1)
1277 if (amdgpu_vm_block_size < 9) {
1278 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1279 amdgpu_vm_block_size);
1280 amdgpu_vm_block_size = -1;
1285 * amdgpu_device_check_vm_size - validate the vm size
1287 * @adev: amdgpu_device pointer
1289 * Validates the vm size in GB specified via module parameter.
1290 * The VM size is the size of the GPU virtual memory space in GB.
1292 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1294 /* no need to check the default value */
1295 if (amdgpu_vm_size == -1)
1298 if (amdgpu_vm_size < 1) {
1299 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1301 amdgpu_vm_size = -1;
1305 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1308 bool is_os_64 = (sizeof(void *) == 8);
1309 uint64_t total_memory;
1310 uint64_t dram_size_seven_GB = 0x1B8000000;
1311 uint64_t dram_size_three_GB = 0xB8000000;
1313 if (amdgpu_smu_memory_pool_size == 0)
1317 DRM_WARN("Not 64-bit OS, feature not supported\n");
1321 total_memory = (uint64_t)si.totalram * si.mem_unit;
1323 if ((amdgpu_smu_memory_pool_size == 1) ||
1324 (amdgpu_smu_memory_pool_size == 2)) {
1325 if (total_memory < dram_size_three_GB)
1327 } else if ((amdgpu_smu_memory_pool_size == 4) ||
1328 (amdgpu_smu_memory_pool_size == 8)) {
1329 if (total_memory < dram_size_seven_GB)
1332 DRM_WARN("Smu memory pool size not supported\n");
1335 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1340 DRM_WARN("No enough system memory\n");
1342 adev->pm.smu_prv_buffer_size = 0;
1346 * amdgpu_device_check_arguments - validate module params
1348 * @adev: amdgpu_device pointer
1350 * Validates certain module parameters and updates
1351 * the associated values used by the driver (all asics).
1353 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1355 if (amdgpu_sched_jobs < 4) {
1356 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1358 amdgpu_sched_jobs = 4;
1359 } else if (!is_power_of_2(amdgpu_sched_jobs)){
1360 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1362 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1365 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1366 /* gart size must be greater or equal to 32M */
1367 dev_warn(adev->dev, "gart size (%d) too small\n",
1369 amdgpu_gart_size = -1;
1372 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1373 /* gtt size must be greater or equal to 32M */
1374 dev_warn(adev->dev, "gtt size (%d) too small\n",
1376 amdgpu_gtt_size = -1;
1379 /* valid range is between 4 and 9 inclusive */
1380 if (amdgpu_vm_fragment_size != -1 &&
1381 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1382 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1383 amdgpu_vm_fragment_size = -1;
1386 if (amdgpu_sched_hw_submission < 2) {
1387 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1388 amdgpu_sched_hw_submission);
1389 amdgpu_sched_hw_submission = 2;
1390 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1391 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1392 amdgpu_sched_hw_submission);
1393 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1396 amdgpu_device_check_smu_prv_buffer_size(adev);
1398 amdgpu_device_check_vm_size(adev);
1400 amdgpu_device_check_block_size(adev);
1402 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1404 amdgpu_gmc_tmz_set(adev);
1406 amdgpu_gmc_noretry_set(adev);
1412 * amdgpu_switcheroo_set_state - set switcheroo state
1414 * @pdev: pci dev pointer
1415 * @state: vga_switcheroo state
1417 * Callback for the switcheroo driver. Suspends or resumes the
1418 * the asics before or after it is powered up using ACPI methods.
1420 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1421 enum vga_switcheroo_state state)
1423 struct drm_device *dev = pci_get_drvdata(pdev);
1426 if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1429 if (state == VGA_SWITCHEROO_ON) {
1430 pr_info("switched on\n");
1431 /* don't suspend or resume card normally */
1432 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1434 pci_set_power_state(pdev, PCI_D0);
1435 amdgpu_device_load_pci_state(pdev);
1436 r = pci_enable_device(pdev);
1438 DRM_WARN("pci_enable_device failed (%d)\n", r);
1439 amdgpu_device_resume(dev, true);
1441 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1443 pr_info("switched off\n");
1444 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1445 amdgpu_device_suspend(dev, true);
1446 amdgpu_device_cache_pci_state(pdev);
1447 /* Shut down the device */
1448 pci_disable_device(pdev);
1449 pci_set_power_state(pdev, PCI_D3cold);
1450 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1455 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1457 * @pdev: pci dev pointer
1459 * Callback for the switcheroo driver. Check of the switcheroo
1460 * state can be changed.
1461 * Returns true if the state can be changed, false if not.
1463 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1465 struct drm_device *dev = pci_get_drvdata(pdev);
1468 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1469 * locking inversion with the driver load path. And the access here is
1470 * completely racy anyway. So don't bother with locking for now.
1472 return atomic_read(&dev->open_count) == 0;
1475 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1476 .set_gpu_state = amdgpu_switcheroo_set_state,
1478 .can_switch = amdgpu_switcheroo_can_switch,
1482 * amdgpu_device_ip_set_clockgating_state - set the CG state
1484 * @dev: amdgpu_device pointer
1485 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1486 * @state: clockgating state (gate or ungate)
1488 * Sets the requested clockgating state for all instances of
1489 * the hardware IP specified.
1490 * Returns the error code from the last instance.
1492 int amdgpu_device_ip_set_clockgating_state(void *dev,
1493 enum amd_ip_block_type block_type,
1494 enum amd_clockgating_state state)
1496 struct amdgpu_device *adev = dev;
1499 for (i = 0; i < adev->num_ip_blocks; i++) {
1500 if (!adev->ip_blocks[i].status.valid)
1502 if (adev->ip_blocks[i].version->type != block_type)
1504 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1506 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1507 (void *)adev, state);
1509 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1510 adev->ip_blocks[i].version->funcs->name, r);
1516 * amdgpu_device_ip_set_powergating_state - set the PG state
1518 * @dev: amdgpu_device pointer
1519 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1520 * @state: powergating state (gate or ungate)
1522 * Sets the requested powergating state for all instances of
1523 * the hardware IP specified.
1524 * Returns the error code from the last instance.
1526 int amdgpu_device_ip_set_powergating_state(void *dev,
1527 enum amd_ip_block_type block_type,
1528 enum amd_powergating_state state)
1530 struct amdgpu_device *adev = dev;
1533 for (i = 0; i < adev->num_ip_blocks; i++) {
1534 if (!adev->ip_blocks[i].status.valid)
1536 if (adev->ip_blocks[i].version->type != block_type)
1538 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1540 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1541 (void *)adev, state);
1543 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1544 adev->ip_blocks[i].version->funcs->name, r);
1550 * amdgpu_device_ip_get_clockgating_state - get the CG state
1552 * @adev: amdgpu_device pointer
1553 * @flags: clockgating feature flags
1555 * Walks the list of IPs on the device and updates the clockgating
1556 * flags for each IP.
1557 * Updates @flags with the feature flags for each hardware IP where
1558 * clockgating is enabled.
1560 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1565 for (i = 0; i < adev->num_ip_blocks; i++) {
1566 if (!adev->ip_blocks[i].status.valid)
1568 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1569 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1574 * amdgpu_device_ip_wait_for_idle - wait for idle
1576 * @adev: amdgpu_device pointer
1577 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1579 * Waits for the request hardware IP to be idle.
1580 * Returns 0 for success or a negative error code on failure.
1582 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1583 enum amd_ip_block_type block_type)
1587 for (i = 0; i < adev->num_ip_blocks; i++) {
1588 if (!adev->ip_blocks[i].status.valid)
1590 if (adev->ip_blocks[i].version->type == block_type) {
1591 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1602 * amdgpu_device_ip_is_idle - is the hardware IP idle
1604 * @adev: amdgpu_device pointer
1605 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1607 * Check if the hardware IP is idle or not.
1608 * Returns true if it the IP is idle, false if not.
1610 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1611 enum amd_ip_block_type block_type)
1615 for (i = 0; i < adev->num_ip_blocks; i++) {
1616 if (!adev->ip_blocks[i].status.valid)
1618 if (adev->ip_blocks[i].version->type == block_type)
1619 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1626 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1628 * @adev: amdgpu_device pointer
1629 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1631 * Returns a pointer to the hardware IP block structure
1632 * if it exists for the asic, otherwise NULL.
1634 struct amdgpu_ip_block *
1635 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1636 enum amd_ip_block_type type)
1640 for (i = 0; i < adev->num_ip_blocks; i++)
1641 if (adev->ip_blocks[i].version->type == type)
1642 return &adev->ip_blocks[i];
1648 * amdgpu_device_ip_block_version_cmp
1650 * @adev: amdgpu_device pointer
1651 * @type: enum amd_ip_block_type
1652 * @major: major version
1653 * @minor: minor version
1655 * return 0 if equal or greater
1656 * return 1 if smaller or the ip_block doesn't exist
1658 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1659 enum amd_ip_block_type type,
1660 u32 major, u32 minor)
1662 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1664 if (ip_block && ((ip_block->version->major > major) ||
1665 ((ip_block->version->major == major) &&
1666 (ip_block->version->minor >= minor))))
1673 * amdgpu_device_ip_block_add
1675 * @adev: amdgpu_device pointer
1676 * @ip_block_version: pointer to the IP to add
1678 * Adds the IP block driver information to the collection of IPs
1681 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1682 const struct amdgpu_ip_block_version *ip_block_version)
1684 if (!ip_block_version)
1687 switch (ip_block_version->type) {
1688 case AMD_IP_BLOCK_TYPE_VCN:
1689 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1692 case AMD_IP_BLOCK_TYPE_JPEG:
1693 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1700 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1701 ip_block_version->funcs->name);
1703 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1709 * amdgpu_device_enable_virtual_display - enable virtual display feature
1711 * @adev: amdgpu_device pointer
1713 * Enabled the virtual display feature if the user has enabled it via
1714 * the module parameter virtual_display. This feature provides a virtual
1715 * display hardware on headless boards or in virtualized environments.
1716 * This function parses and validates the configuration string specified by
1717 * the user and configues the virtual display configuration (number of
1718 * virtual connectors, crtcs, etc.) specified.
1720 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1722 adev->enable_virtual_display = false;
1724 if (amdgpu_virtual_display) {
1725 const char *pci_address_name = pci_name(adev->pdev);
1726 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1728 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1729 pciaddstr_tmp = pciaddstr;
1730 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1731 pciaddname = strsep(&pciaddname_tmp, ",");
1732 if (!strcmp("all", pciaddname)
1733 || !strcmp(pci_address_name, pciaddname)) {
1737 adev->enable_virtual_display = true;
1740 res = kstrtol(pciaddname_tmp, 10,
1748 adev->mode_info.num_crtc = num_crtc;
1750 adev->mode_info.num_crtc = 1;
1756 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1757 amdgpu_virtual_display, pci_address_name,
1758 adev->enable_virtual_display, adev->mode_info.num_crtc);
1765 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1767 * @adev: amdgpu_device pointer
1769 * Parses the asic configuration parameters specified in the gpu info
1770 * firmware and makes them availale to the driver for use in configuring
1772 * Returns 0 on success, -EINVAL on failure.
1774 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1776 const char *chip_name;
1779 const struct gpu_info_firmware_header_v1_0 *hdr;
1781 adev->firmware.gpu_info_fw = NULL;
1783 if (adev->mman.discovery_bin) {
1784 amdgpu_discovery_get_gfx_info(adev);
1787 * FIXME: The bounding box is still needed by Navi12, so
1788 * temporarily read it from gpu_info firmware. Should be droped
1789 * when DAL no longer needs it.
1791 if (adev->asic_type != CHIP_NAVI12)
1795 switch (adev->asic_type) {
1796 #ifdef CONFIG_DRM_AMDGPU_SI
1803 #ifdef CONFIG_DRM_AMDGPU_CIK
1813 case CHIP_POLARIS10:
1814 case CHIP_POLARIS11:
1815 case CHIP_POLARIS12:
1820 case CHIP_ALDEBARAN:
1821 case CHIP_SIENNA_CICHLID:
1822 case CHIP_NAVY_FLOUNDER:
1823 case CHIP_DIMGREY_CAVEFISH:
1824 case CHIP_BEIGE_GOBY:
1828 chip_name = "vega10";
1831 chip_name = "vega12";
1834 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1835 chip_name = "raven2";
1836 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1837 chip_name = "picasso";
1839 chip_name = "raven";
1842 chip_name = "arcturus";
1845 if (adev->apu_flags & AMD_APU_IS_RENOIR)
1846 chip_name = "renoir";
1848 chip_name = "green_sardine";
1851 chip_name = "navi10";
1854 chip_name = "navi14";
1857 chip_name = "navi12";
1860 chip_name = "vangogh";
1864 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1865 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1868 "Failed to load gpu_info firmware \"%s\"\n",
1872 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1875 "Failed to validate gpu_info firmware \"%s\"\n",
1880 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1881 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1883 switch (hdr->version_major) {
1886 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1887 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1888 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1891 * Should be droped when DAL no longer needs it.
1893 if (adev->asic_type == CHIP_NAVI12)
1894 goto parse_soc_bounding_box;
1896 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1897 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1898 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1899 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1900 adev->gfx.config.max_texture_channel_caches =
1901 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1902 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1903 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1904 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1905 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1906 adev->gfx.config.double_offchip_lds_buf =
1907 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1908 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1909 adev->gfx.cu_info.max_waves_per_simd =
1910 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1911 adev->gfx.cu_info.max_scratch_slots_per_cu =
1912 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1913 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1914 if (hdr->version_minor >= 1) {
1915 const struct gpu_info_firmware_v1_1 *gpu_info_fw =
1916 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
1917 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1918 adev->gfx.config.num_sc_per_sh =
1919 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
1920 adev->gfx.config.num_packer_per_sc =
1921 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
1924 parse_soc_bounding_box:
1926 * soc bounding box info is not integrated in disocovery table,
1927 * we always need to parse it from gpu info firmware if needed.
1929 if (hdr->version_minor == 2) {
1930 const struct gpu_info_firmware_v1_2 *gpu_info_fw =
1931 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
1932 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1933 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
1939 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1948 * amdgpu_device_ip_early_init - run early init for hardware IPs
1950 * @adev: amdgpu_device pointer
1952 * Early initialization pass for hardware IPs. The hardware IPs that make
1953 * up each asic are discovered each IP's early_init callback is run. This
1954 * is the first stage in initializing the asic.
1955 * Returns 0 on success, negative error code on failure.
1957 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1961 amdgpu_device_enable_virtual_display(adev);
1963 if (amdgpu_sriov_vf(adev)) {
1964 r = amdgpu_virt_request_full_gpu(adev, true);
1969 switch (adev->asic_type) {
1970 #ifdef CONFIG_DRM_AMDGPU_SI
1976 adev->family = AMDGPU_FAMILY_SI;
1977 r = si_set_ip_blocks(adev);
1982 #ifdef CONFIG_DRM_AMDGPU_CIK
1988 if (adev->flags & AMD_IS_APU)
1989 adev->family = AMDGPU_FAMILY_KV;
1991 adev->family = AMDGPU_FAMILY_CI;
1993 r = cik_set_ip_blocks(adev);
2001 case CHIP_POLARIS10:
2002 case CHIP_POLARIS11:
2003 case CHIP_POLARIS12:
2007 if (adev->flags & AMD_IS_APU)
2008 adev->family = AMDGPU_FAMILY_CZ;
2010 adev->family = AMDGPU_FAMILY_VI;
2012 r = vi_set_ip_blocks(adev);
2022 case CHIP_ALDEBARAN:
2023 if (adev->flags & AMD_IS_APU)
2024 adev->family = AMDGPU_FAMILY_RV;
2026 adev->family = AMDGPU_FAMILY_AI;
2028 r = soc15_set_ip_blocks(adev);
2035 case CHIP_SIENNA_CICHLID:
2036 case CHIP_NAVY_FLOUNDER:
2037 case CHIP_DIMGREY_CAVEFISH:
2038 case CHIP_BEIGE_GOBY:
2040 if (adev->asic_type == CHIP_VANGOGH)
2041 adev->family = AMDGPU_FAMILY_VGH;
2043 adev->family = AMDGPU_FAMILY_NV;
2045 r = nv_set_ip_blocks(adev);
2050 /* FIXME: not supported yet */
2054 amdgpu_amdkfd_device_probe(adev);
2056 adev->pm.pp_feature = amdgpu_pp_feature_mask;
2057 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2058 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2059 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2060 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2062 for (i = 0; i < adev->num_ip_blocks; i++) {
2063 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2064 DRM_ERROR("disabled ip block: %d <%s>\n",
2065 i, adev->ip_blocks[i].version->funcs->name);
2066 adev->ip_blocks[i].status.valid = false;
2068 if (adev->ip_blocks[i].version->funcs->early_init) {
2069 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2071 adev->ip_blocks[i].status.valid = false;
2073 DRM_ERROR("early_init of IP block <%s> failed %d\n",
2074 adev->ip_blocks[i].version->funcs->name, r);
2077 adev->ip_blocks[i].status.valid = true;
2080 adev->ip_blocks[i].status.valid = true;
2083 /* get the vbios after the asic_funcs are set up */
2084 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2085 r = amdgpu_device_parse_gpu_info_fw(adev);
2090 if (!amdgpu_get_bios(adev))
2093 r = amdgpu_atombios_init(adev);
2095 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2096 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2100 /*get pf2vf msg info at it's earliest time*/
2101 if (amdgpu_sriov_vf(adev))
2102 amdgpu_virt_init_data_exchange(adev);
2107 adev->cg_flags &= amdgpu_cg_mask;
2108 adev->pg_flags &= amdgpu_pg_mask;
2113 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2117 for (i = 0; i < adev->num_ip_blocks; i++) {
2118 if (!adev->ip_blocks[i].status.sw)
2120 if (adev->ip_blocks[i].status.hw)
2122 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2123 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2124 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2125 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2127 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2128 adev->ip_blocks[i].version->funcs->name, r);
2131 adev->ip_blocks[i].status.hw = true;
2138 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2142 for (i = 0; i < adev->num_ip_blocks; i++) {
2143 if (!adev->ip_blocks[i].status.sw)
2145 if (adev->ip_blocks[i].status.hw)
2147 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2149 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2150 adev->ip_blocks[i].version->funcs->name, r);
2153 adev->ip_blocks[i].status.hw = true;
2159 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2163 uint32_t smu_version;
2165 if (adev->asic_type >= CHIP_VEGA10) {
2166 for (i = 0; i < adev->num_ip_blocks; i++) {
2167 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2170 if (!adev->ip_blocks[i].status.sw)
2173 /* no need to do the fw loading again if already done*/
2174 if (adev->ip_blocks[i].status.hw == true)
2177 if (amdgpu_in_reset(adev) || adev->in_suspend) {
2178 r = adev->ip_blocks[i].version->funcs->resume(adev);
2180 DRM_ERROR("resume of IP block <%s> failed %d\n",
2181 adev->ip_blocks[i].version->funcs->name, r);
2185 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2187 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2188 adev->ip_blocks[i].version->funcs->name, r);
2193 adev->ip_blocks[i].status.hw = true;
2198 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2199 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2205 * amdgpu_device_ip_init - run init for hardware IPs
2207 * @adev: amdgpu_device pointer
2209 * Main initialization pass for hardware IPs. The list of all the hardware
2210 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2211 * are run. sw_init initializes the software state associated with each IP
2212 * and hw_init initializes the hardware associated with each IP.
2213 * Returns 0 on success, negative error code on failure.
2215 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2219 r = amdgpu_ras_init(adev);
2223 for (i = 0; i < adev->num_ip_blocks; i++) {
2224 if (!adev->ip_blocks[i].status.valid)
2226 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2228 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2229 adev->ip_blocks[i].version->funcs->name, r);
2232 adev->ip_blocks[i].status.sw = true;
2234 /* need to do gmc hw init early so we can allocate gpu mem */
2235 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2236 r = amdgpu_device_vram_scratch_init(adev);
2238 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2241 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2243 DRM_ERROR("hw_init %d failed %d\n", i, r);
2246 r = amdgpu_device_wb_init(adev);
2248 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2251 adev->ip_blocks[i].status.hw = true;
2253 /* right after GMC hw init, we create CSA */
2254 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2255 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2256 AMDGPU_GEM_DOMAIN_VRAM,
2259 DRM_ERROR("allocate CSA failed %d\n", r);
2266 if (amdgpu_sriov_vf(adev))
2267 amdgpu_virt_init_data_exchange(adev);
2269 r = amdgpu_ib_pool_init(adev);
2271 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2272 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2276 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2280 r = amdgpu_device_ip_hw_init_phase1(adev);
2284 r = amdgpu_device_fw_loading(adev);
2288 r = amdgpu_device_ip_hw_init_phase2(adev);
2293 * retired pages will be loaded from eeprom and reserved here,
2294 * it should be called after amdgpu_device_ip_hw_init_phase2 since
2295 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2296 * for I2C communication which only true at this point.
2298 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2299 * failure from bad gpu situation and stop amdgpu init process
2300 * accordingly. For other failed cases, it will still release all
2301 * the resource and print error message, rather than returning one
2302 * negative value to upper level.
2304 * Note: theoretically, this should be called before all vram allocations
2305 * to protect retired page from abusing
2307 r = amdgpu_ras_recovery_init(adev);
2311 if (adev->gmc.xgmi.num_physical_nodes > 1)
2312 amdgpu_xgmi_add_device(adev);
2314 /* Don't init kfd if whole hive need to be reset during init */
2315 if (!adev->gmc.xgmi.pending_reset)
2316 amdgpu_amdkfd_device_init(adev);
2318 amdgpu_fru_get_product_info(adev);
2321 if (amdgpu_sriov_vf(adev))
2322 amdgpu_virt_release_full_gpu(adev, true);
2328 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2330 * @adev: amdgpu_device pointer
2332 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
2333 * this function before a GPU reset. If the value is retained after a
2334 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
2336 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2338 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2342 * amdgpu_device_check_vram_lost - check if vram is valid
2344 * @adev: amdgpu_device pointer
2346 * Checks the reset magic value written to the gart pointer in VRAM.
2347 * The driver calls this after a GPU reset to see if the contents of
2348 * VRAM is lost or now.
2349 * returns true if vram is lost, false if not.
2351 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2353 if (memcmp(adev->gart.ptr, adev->reset_magic,
2354 AMDGPU_RESET_MAGIC_NUM))
2357 if (!amdgpu_in_reset(adev))
2361 * For all ASICs with baco/mode1 reset, the VRAM is
2362 * always assumed to be lost.
2364 switch (amdgpu_asic_reset_method(adev)) {
2365 case AMD_RESET_METHOD_BACO:
2366 case AMD_RESET_METHOD_MODE1:
2374 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2376 * @adev: amdgpu_device pointer
2377 * @state: clockgating state (gate or ungate)
2379 * The list of all the hardware IPs that make up the asic is walked and the
2380 * set_clockgating_state callbacks are run.
2381 * Late initialization pass enabling clockgating for hardware IPs.
2382 * Fini or suspend, pass disabling clockgating for hardware IPs.
2383 * Returns 0 on success, negative error code on failure.
2386 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2387 enum amd_clockgating_state state)
2391 if (amdgpu_emu_mode == 1)
2394 for (j = 0; j < adev->num_ip_blocks; j++) {
2395 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2396 if (!adev->ip_blocks[i].status.late_initialized)
2398 /* skip CG for GFX on S0ix */
2399 if (adev->in_s0ix &&
2400 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2402 /* skip CG for VCE/UVD, it's handled specially */
2403 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2404 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2405 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2406 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2407 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2408 /* enable clockgating to save power */
2409 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2412 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2413 adev->ip_blocks[i].version->funcs->name, r);
2422 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2423 enum amd_powergating_state state)
2427 if (amdgpu_emu_mode == 1)
2430 for (j = 0; j < adev->num_ip_blocks; j++) {
2431 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2432 if (!adev->ip_blocks[i].status.late_initialized)
2434 /* skip PG for GFX on S0ix */
2435 if (adev->in_s0ix &&
2436 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2438 /* skip CG for VCE/UVD, it's handled specially */
2439 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2440 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2441 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2442 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2443 adev->ip_blocks[i].version->funcs->set_powergating_state) {
2444 /* enable powergating to save power */
2445 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2448 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2449 adev->ip_blocks[i].version->funcs->name, r);
2457 static int amdgpu_device_enable_mgpu_fan_boost(void)
2459 struct amdgpu_gpu_instance *gpu_ins;
2460 struct amdgpu_device *adev;
2463 mutex_lock(&mgpu_info.mutex);
2466 * MGPU fan boost feature should be enabled
2467 * only when there are two or more dGPUs in
2470 if (mgpu_info.num_dgpu < 2)
2473 for (i = 0; i < mgpu_info.num_dgpu; i++) {
2474 gpu_ins = &(mgpu_info.gpu_ins[i]);
2475 adev = gpu_ins->adev;
2476 if (!(adev->flags & AMD_IS_APU) &&
2477 !gpu_ins->mgpu_fan_enabled) {
2478 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2482 gpu_ins->mgpu_fan_enabled = 1;
2487 mutex_unlock(&mgpu_info.mutex);
2493 * amdgpu_device_ip_late_init - run late init for hardware IPs
2495 * @adev: amdgpu_device pointer
2497 * Late initialization pass for hardware IPs. The list of all the hardware
2498 * IPs that make up the asic is walked and the late_init callbacks are run.
2499 * late_init covers any special initialization that an IP requires
2500 * after all of the have been initialized or something that needs to happen
2501 * late in the init process.
2502 * Returns 0 on success, negative error code on failure.
2504 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2506 struct amdgpu_gpu_instance *gpu_instance;
2509 for (i = 0; i < adev->num_ip_blocks; i++) {
2510 if (!adev->ip_blocks[i].status.hw)
2512 if (adev->ip_blocks[i].version->funcs->late_init) {
2513 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2515 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2516 adev->ip_blocks[i].version->funcs->name, r);
2520 adev->ip_blocks[i].status.late_initialized = true;
2523 amdgpu_ras_set_error_query_ready(adev, true);
2525 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2526 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2528 amdgpu_device_fill_reset_magic(adev);
2530 r = amdgpu_device_enable_mgpu_fan_boost();
2532 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2534 /* For XGMI + passthrough configuration on arcturus, enable light SBR */
2535 if (adev->asic_type == CHIP_ARCTURUS &&
2536 amdgpu_passthrough(adev) &&
2537 adev->gmc.xgmi.num_physical_nodes > 1)
2538 smu_set_light_sbr(&adev->smu, true);
2540 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2541 mutex_lock(&mgpu_info.mutex);
2544 * Reset device p-state to low as this was booted with high.
2546 * This should be performed only after all devices from the same
2547 * hive get initialized.
2549 * However, it's unknown how many device in the hive in advance.
2550 * As this is counted one by one during devices initializations.
2552 * So, we wait for all XGMI interlinked devices initialized.
2553 * This may bring some delays as those devices may come from
2554 * different hives. But that should be OK.
2556 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2557 for (i = 0; i < mgpu_info.num_gpu; i++) {
2558 gpu_instance = &(mgpu_info.gpu_ins[i]);
2559 if (gpu_instance->adev->flags & AMD_IS_APU)
2562 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2563 AMDGPU_XGMI_PSTATE_MIN);
2565 DRM_ERROR("pstate setting failed (%d).\n", r);
2571 mutex_unlock(&mgpu_info.mutex);
2578 * amdgpu_device_ip_fini - run fini for hardware IPs
2580 * @adev: amdgpu_device pointer
2582 * Main teardown pass for hardware IPs. The list of all the hardware
2583 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2584 * are run. hw_fini tears down the hardware associated with each IP
2585 * and sw_fini tears down any software state associated with each IP.
2586 * Returns 0 on success, negative error code on failure.
2588 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2592 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2593 amdgpu_virt_release_ras_err_handler_data(adev);
2595 amdgpu_ras_pre_fini(adev);
2597 if (adev->gmc.xgmi.num_physical_nodes > 1)
2598 amdgpu_xgmi_remove_device(adev);
2600 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2601 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2603 amdgpu_amdkfd_device_fini(adev);
2605 /* need to disable SMC first */
2606 for (i = 0; i < adev->num_ip_blocks; i++) {
2607 if (!adev->ip_blocks[i].status.hw)
2609 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2610 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2611 /* XXX handle errors */
2613 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2614 adev->ip_blocks[i].version->funcs->name, r);
2616 adev->ip_blocks[i].status.hw = false;
2621 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2622 if (!adev->ip_blocks[i].status.hw)
2625 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2626 /* XXX handle errors */
2628 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2629 adev->ip_blocks[i].version->funcs->name, r);
2632 adev->ip_blocks[i].status.hw = false;
2636 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2637 if (!adev->ip_blocks[i].status.sw)
2640 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2641 amdgpu_ucode_free_bo(adev);
2642 amdgpu_free_static_csa(&adev->virt.csa_obj);
2643 amdgpu_device_wb_fini(adev);
2644 amdgpu_device_vram_scratch_fini(adev);
2645 amdgpu_ib_pool_fini(adev);
2648 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2649 /* XXX handle errors */
2651 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2652 adev->ip_blocks[i].version->funcs->name, r);
2654 adev->ip_blocks[i].status.sw = false;
2655 adev->ip_blocks[i].status.valid = false;
2658 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2659 if (!adev->ip_blocks[i].status.late_initialized)
2661 if (adev->ip_blocks[i].version->funcs->late_fini)
2662 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2663 adev->ip_blocks[i].status.late_initialized = false;
2666 amdgpu_ras_fini(adev);
2668 if (amdgpu_sriov_vf(adev))
2669 if (amdgpu_virt_release_full_gpu(adev, false))
2670 DRM_ERROR("failed to release exclusive mode on fini\n");
2676 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2678 * @work: work_struct.
2680 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2682 struct amdgpu_device *adev =
2683 container_of(work, struct amdgpu_device, delayed_init_work.work);
2686 r = amdgpu_ib_ring_tests(adev);
2688 DRM_ERROR("ib ring test failed (%d).\n", r);
2691 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2693 struct amdgpu_device *adev =
2694 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2696 mutex_lock(&adev->gfx.gfx_off_mutex);
2697 if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
2698 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2699 adev->gfx.gfx_off_state = true;
2701 mutex_unlock(&adev->gfx.gfx_off_mutex);
2705 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2707 * @adev: amdgpu_device pointer
2709 * Main suspend function for hardware IPs. The list of all the hardware
2710 * IPs that make up the asic is walked, clockgating is disabled and the
2711 * suspend callbacks are run. suspend puts the hardware and software state
2712 * in each IP into a state suitable for suspend.
2713 * Returns 0 on success, negative error code on failure.
2715 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2719 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2720 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2722 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2723 if (!adev->ip_blocks[i].status.valid)
2726 /* displays are handled separately */
2727 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2730 /* XXX handle errors */
2731 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2732 /* XXX handle errors */
2734 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2735 adev->ip_blocks[i].version->funcs->name, r);
2739 adev->ip_blocks[i].status.hw = false;
2746 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2748 * @adev: amdgpu_device pointer
2750 * Main suspend function for hardware IPs. The list of all the hardware
2751 * IPs that make up the asic is walked, clockgating is disabled and the
2752 * suspend callbacks are run. suspend puts the hardware and software state
2753 * in each IP into a state suitable for suspend.
2754 * Returns 0 on success, negative error code on failure.
2756 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2761 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
2763 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2764 if (!adev->ip_blocks[i].status.valid)
2766 /* displays are handled in phase1 */
2767 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2769 /* PSP lost connection when err_event_athub occurs */
2770 if (amdgpu_ras_intr_triggered() &&
2771 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2772 adev->ip_blocks[i].status.hw = false;
2776 /* skip unnecessary suspend if we do not initialize them yet */
2777 if (adev->gmc.xgmi.pending_reset &&
2778 !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2779 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2780 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2781 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2782 adev->ip_blocks[i].status.hw = false;
2786 /* skip suspend of gfx and psp for S0ix
2787 * gfx is in gfxoff state, so on resume it will exit gfxoff just
2788 * like at runtime. PSP is also part of the always on hardware
2789 * so no need to suspend it.
2791 if (adev->in_s0ix &&
2792 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
2793 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
2796 /* XXX handle errors */
2797 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2798 /* XXX handle errors */
2800 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2801 adev->ip_blocks[i].version->funcs->name, r);
2803 adev->ip_blocks[i].status.hw = false;
2804 /* handle putting the SMC in the appropriate state */
2805 if(!amdgpu_sriov_vf(adev)){
2806 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2807 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
2809 DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2810 adev->mp1_state, r);
2821 * amdgpu_device_ip_suspend - run suspend for hardware IPs
2823 * @adev: amdgpu_device pointer
2825 * Main suspend function for hardware IPs. The list of all the hardware
2826 * IPs that make up the asic is walked, clockgating is disabled and the
2827 * suspend callbacks are run. suspend puts the hardware and software state
2828 * in each IP into a state suitable for suspend.
2829 * Returns 0 on success, negative error code on failure.
2831 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2835 if (amdgpu_sriov_vf(adev)) {
2836 amdgpu_virt_fini_data_exchange(adev);
2837 amdgpu_virt_request_full_gpu(adev, false);
2840 r = amdgpu_device_ip_suspend_phase1(adev);
2843 r = amdgpu_device_ip_suspend_phase2(adev);
2845 if (amdgpu_sriov_vf(adev))
2846 amdgpu_virt_release_full_gpu(adev, false);
2851 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2855 static enum amd_ip_block_type ip_order[] = {
2856 AMD_IP_BLOCK_TYPE_GMC,
2857 AMD_IP_BLOCK_TYPE_COMMON,
2858 AMD_IP_BLOCK_TYPE_PSP,
2859 AMD_IP_BLOCK_TYPE_IH,
2862 for (i = 0; i < adev->num_ip_blocks; i++) {
2864 struct amdgpu_ip_block *block;
2866 block = &adev->ip_blocks[i];
2867 block->status.hw = false;
2869 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
2871 if (block->version->type != ip_order[j] ||
2872 !block->status.valid)
2875 r = block->version->funcs->hw_init(adev);
2876 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2879 block->status.hw = true;
2886 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2890 static enum amd_ip_block_type ip_order[] = {
2891 AMD_IP_BLOCK_TYPE_SMC,
2892 AMD_IP_BLOCK_TYPE_DCE,
2893 AMD_IP_BLOCK_TYPE_GFX,
2894 AMD_IP_BLOCK_TYPE_SDMA,
2895 AMD_IP_BLOCK_TYPE_UVD,
2896 AMD_IP_BLOCK_TYPE_VCE,
2897 AMD_IP_BLOCK_TYPE_VCN
2900 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2902 struct amdgpu_ip_block *block;
2904 for (j = 0; j < adev->num_ip_blocks; j++) {
2905 block = &adev->ip_blocks[j];
2907 if (block->version->type != ip_order[i] ||
2908 !block->status.valid ||
2912 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
2913 r = block->version->funcs->resume(adev);
2915 r = block->version->funcs->hw_init(adev);
2917 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2920 block->status.hw = true;
2928 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
2930 * @adev: amdgpu_device pointer
2932 * First resume function for hardware IPs. The list of all the hardware
2933 * IPs that make up the asic is walked and the resume callbacks are run for
2934 * COMMON, GMC, and IH. resume puts the hardware into a functional state
2935 * after a suspend and updates the software state as necessary. This
2936 * function is also used for restoring the GPU after a GPU reset.
2937 * Returns 0 on success, negative error code on failure.
2939 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
2943 for (i = 0; i < adev->num_ip_blocks; i++) {
2944 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2946 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2947 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2948 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2950 r = adev->ip_blocks[i].version->funcs->resume(adev);
2952 DRM_ERROR("resume of IP block <%s> failed %d\n",
2953 adev->ip_blocks[i].version->funcs->name, r);
2956 adev->ip_blocks[i].status.hw = true;
2964 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
2966 * @adev: amdgpu_device pointer
2968 * First resume function for hardware IPs. The list of all the hardware
2969 * IPs that make up the asic is walked and the resume callbacks are run for
2970 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
2971 * functional state after a suspend and updates the software state as
2972 * necessary. This function is also used for restoring the GPU after a GPU
2974 * Returns 0 on success, negative error code on failure.
2976 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
2980 for (i = 0; i < adev->num_ip_blocks; i++) {
2981 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2983 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2984 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2985 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
2986 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
2988 r = adev->ip_blocks[i].version->funcs->resume(adev);
2990 DRM_ERROR("resume of IP block <%s> failed %d\n",
2991 adev->ip_blocks[i].version->funcs->name, r);
2994 adev->ip_blocks[i].status.hw = true;
3001 * amdgpu_device_ip_resume - run resume for hardware IPs
3003 * @adev: amdgpu_device pointer
3005 * Main resume function for hardware IPs. The hardware IPs
3006 * are split into two resume functions because they are
3007 * are also used in in recovering from a GPU reset and some additional
3008 * steps need to be take between them. In this case (S3/S4) they are
3010 * Returns 0 on success, negative error code on failure.
3012 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3016 r = amdgpu_device_ip_resume_phase1(adev);
3020 r = amdgpu_device_fw_loading(adev);
3024 r = amdgpu_device_ip_resume_phase2(adev);
3030 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3032 * @adev: amdgpu_device pointer
3034 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3036 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3038 if (amdgpu_sriov_vf(adev)) {
3039 if (adev->is_atom_fw) {
3040 if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3041 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3043 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3044 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3047 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3048 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3053 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3055 * @asic_type: AMD asic type
3057 * Check if there is DC (new modesetting infrastructre) support for an asic.
3058 * returns true if DC has support, false if not.
3060 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3062 switch (asic_type) {
3063 #if defined(CONFIG_DRM_AMD_DC)
3064 #if defined(CONFIG_DRM_AMD_DC_SI)
3075 * We have systems in the wild with these ASICs that require
3076 * LVDS and VGA support which is not supported with DC.
3078 * Fallback to the non-DC driver here by default so as not to
3079 * cause regressions.
3081 return amdgpu_dc > 0;
3085 case CHIP_POLARIS10:
3086 case CHIP_POLARIS11:
3087 case CHIP_POLARIS12:
3094 #if defined(CONFIG_DRM_AMD_DC_DCN)
3100 case CHIP_SIENNA_CICHLID:
3101 case CHIP_NAVY_FLOUNDER:
3102 case CHIP_DIMGREY_CAVEFISH:
3103 case CHIP_BEIGE_GOBY:
3106 return amdgpu_dc != 0;
3110 DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3111 "but isn't supported by ASIC, ignoring\n");
3117 * amdgpu_device_has_dc_support - check if dc is supported
3119 * @adev: amdgpu_device pointer
3121 * Returns true for supported, false for not supported
3123 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3125 if (amdgpu_sriov_vf(adev) ||
3126 adev->enable_virtual_display ||
3127 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3130 return amdgpu_device_asic_has_dc_support(adev->asic_type);
3133 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3135 struct amdgpu_device *adev =
3136 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3137 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3139 /* It's a bug to not have a hive within this function */
3144 * Use task barrier to synchronize all xgmi reset works across the
3145 * hive. task_barrier_enter and task_barrier_exit will block
3146 * until all the threads running the xgmi reset works reach
3147 * those points. task_barrier_full will do both blocks.
3149 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3151 task_barrier_enter(&hive->tb);
3152 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3154 if (adev->asic_reset_res)
3157 task_barrier_exit(&hive->tb);
3158 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3160 if (adev->asic_reset_res)
3163 if (adev->mmhub.ras_funcs &&
3164 adev->mmhub.ras_funcs->reset_ras_error_count)
3165 adev->mmhub.ras_funcs->reset_ras_error_count(adev);
3168 task_barrier_full(&hive->tb);
3169 adev->asic_reset_res = amdgpu_asic_reset(adev);
3173 if (adev->asic_reset_res)
3174 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3175 adev->asic_reset_res, adev_to_drm(adev)->unique);
3176 amdgpu_put_xgmi_hive(hive);
3179 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3181 char *input = amdgpu_lockup_timeout;
3182 char *timeout_setting = NULL;
3188 * By default timeout for non compute jobs is 10000
3189 * and 60000 for compute jobs.
3190 * In SR-IOV or passthrough mode, timeout for compute
3191 * jobs are 60000 by default.
3193 adev->gfx_timeout = msecs_to_jiffies(10000);
3194 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3195 if (amdgpu_sriov_vf(adev))
3196 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3197 msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3199 adev->compute_timeout = msecs_to_jiffies(60000);
3201 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3202 while ((timeout_setting = strsep(&input, ",")) &&
3203 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3204 ret = kstrtol(timeout_setting, 0, &timeout);
3211 } else if (timeout < 0) {
3212 timeout = MAX_SCHEDULE_TIMEOUT;
3214 timeout = msecs_to_jiffies(timeout);
3219 adev->gfx_timeout = timeout;
3222 adev->compute_timeout = timeout;
3225 adev->sdma_timeout = timeout;
3228 adev->video_timeout = timeout;
3235 * There is only one value specified and
3236 * it should apply to all non-compute jobs.
3239 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3240 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3241 adev->compute_timeout = adev->gfx_timeout;
3248 static const struct attribute *amdgpu_dev_attributes[] = {
3249 &dev_attr_product_name.attr,
3250 &dev_attr_product_number.attr,
3251 &dev_attr_serial_number.attr,
3252 &dev_attr_pcie_replay_count.attr,
3258 * amdgpu_device_init - initialize the driver
3260 * @adev: amdgpu_device pointer
3261 * @flags: driver flags
3263 * Initializes the driver info and hw (all asics).
3264 * Returns 0 for success or an error on failure.
3265 * Called at driver startup.
3267 int amdgpu_device_init(struct amdgpu_device *adev,
3270 struct drm_device *ddev = adev_to_drm(adev);
3271 struct pci_dev *pdev = adev->pdev;
3276 adev->shutdown = false;
3277 adev->flags = flags;
3279 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3280 adev->asic_type = amdgpu_force_asic_type;
3282 adev->asic_type = flags & AMD_ASIC_MASK;
3284 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3285 if (amdgpu_emu_mode == 1)
3286 adev->usec_timeout *= 10;
3287 adev->gmc.gart_size = 512 * 1024 * 1024;
3288 adev->accel_working = false;
3289 adev->num_rings = 0;
3290 adev->mman.buffer_funcs = NULL;
3291 adev->mman.buffer_funcs_ring = NULL;
3292 adev->vm_manager.vm_pte_funcs = NULL;
3293 adev->vm_manager.vm_pte_num_scheds = 0;
3294 adev->gmc.gmc_funcs = NULL;
3295 adev->harvest_ip_mask = 0x0;
3296 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3297 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3299 adev->smc_rreg = &amdgpu_invalid_rreg;
3300 adev->smc_wreg = &amdgpu_invalid_wreg;
3301 adev->pcie_rreg = &amdgpu_invalid_rreg;
3302 adev->pcie_wreg = &amdgpu_invalid_wreg;
3303 adev->pciep_rreg = &amdgpu_invalid_rreg;
3304 adev->pciep_wreg = &amdgpu_invalid_wreg;
3305 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3306 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3307 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3308 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3309 adev->didt_rreg = &amdgpu_invalid_rreg;
3310 adev->didt_wreg = &amdgpu_invalid_wreg;
3311 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3312 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3313 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3314 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3316 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3317 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3318 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3320 /* mutex initialization are all done here so we
3321 * can recall function without having locking issues */
3322 mutex_init(&adev->firmware.mutex);
3323 mutex_init(&adev->pm.mutex);
3324 mutex_init(&adev->gfx.gpu_clock_mutex);
3325 mutex_init(&adev->srbm_mutex);
3326 mutex_init(&adev->gfx.pipe_reserve_mutex);
3327 mutex_init(&adev->gfx.gfx_off_mutex);
3328 mutex_init(&adev->grbm_idx_mutex);
3329 mutex_init(&adev->mn_lock);
3330 mutex_init(&adev->virt.vf_errors.lock);
3331 hash_init(adev->mn_hash);
3332 atomic_set(&adev->in_gpu_reset, 0);
3333 init_rwsem(&adev->reset_sem);
3334 mutex_init(&adev->psp.mutex);
3335 mutex_init(&adev->notifier_lock);
3337 r = amdgpu_device_check_arguments(adev);
3341 spin_lock_init(&adev->mmio_idx_lock);
3342 spin_lock_init(&adev->smc_idx_lock);
3343 spin_lock_init(&adev->pcie_idx_lock);
3344 spin_lock_init(&adev->uvd_ctx_idx_lock);
3345 spin_lock_init(&adev->didt_idx_lock);
3346 spin_lock_init(&adev->gc_cac_idx_lock);
3347 spin_lock_init(&adev->se_cac_idx_lock);
3348 spin_lock_init(&adev->audio_endpt_idx_lock);
3349 spin_lock_init(&adev->mm_stats.lock);
3351 INIT_LIST_HEAD(&adev->shadow_list);
3352 mutex_init(&adev->shadow_list_lock);
3354 INIT_LIST_HEAD(&adev->reset_list);
3356 INIT_DELAYED_WORK(&adev->delayed_init_work,
3357 amdgpu_device_delayed_init_work_handler);
3358 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3359 amdgpu_device_delay_enable_gfx_off);
3361 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3363 adev->gfx.gfx_off_req_count = 1;
3364 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3366 atomic_set(&adev->throttling_logging_enabled, 1);
3368 * If throttling continues, logging will be performed every minute
3369 * to avoid log flooding. "-1" is subtracted since the thermal
3370 * throttling interrupt comes every second. Thus, the total logging
3371 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3372 * for throttling interrupt) = 60 seconds.
3374 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3375 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3377 /* Registers mapping */
3378 /* TODO: block userspace mapping of io register */
3379 if (adev->asic_type >= CHIP_BONAIRE) {
3380 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3381 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3383 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3384 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3387 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3388 if (adev->rmmio == NULL) {
3391 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3392 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3394 /* enable PCIE atomic ops */
3395 r = pci_enable_atomic_ops_to_root(adev->pdev,
3396 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3397 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3399 adev->have_atomics_support = false;
3400 DRM_INFO("PCIE atomic ops is not supported\n");
3402 adev->have_atomics_support = true;
3405 amdgpu_device_get_pcie_info(adev);
3408 DRM_INFO("MCBP is enabled\n");
3410 if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
3411 adev->enable_mes = true;
3413 /* detect hw virtualization here */
3414 amdgpu_detect_virtualization(adev);
3416 r = amdgpu_device_get_job_timeout_settings(adev);
3418 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3422 /* early init functions */
3423 r = amdgpu_device_ip_early_init(adev);
3427 /* doorbell bar mapping and doorbell index init*/
3428 amdgpu_device_doorbell_init(adev);
3430 if (amdgpu_emu_mode == 1) {
3431 /* post the asic on emulation mode */
3432 emu_soc_asic_init(adev);
3433 goto fence_driver_init;
3436 amdgpu_reset_init(adev);
3438 /* detect if we are with an SRIOV vbios */
3439 amdgpu_device_detect_sriov_bios(adev);
3441 /* check if we need to reset the asic
3442 * E.g., driver was not cleanly unloaded previously, etc.
3444 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3445 if (adev->gmc.xgmi.num_physical_nodes) {
3446 dev_info(adev->dev, "Pending hive reset.\n");
3447 adev->gmc.xgmi.pending_reset = true;
3448 /* Only need to init necessary block for SMU to handle the reset */
3449 for (i = 0; i < adev->num_ip_blocks; i++) {
3450 if (!adev->ip_blocks[i].status.valid)
3452 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3453 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3454 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3455 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3456 DRM_DEBUG("IP %s disabled for hw_init.\n",
3457 adev->ip_blocks[i].version->funcs->name);
3458 adev->ip_blocks[i].status.hw = true;
3462 r = amdgpu_asic_reset(adev);
3464 dev_err(adev->dev, "asic reset on init failed\n");
3470 pci_enable_pcie_error_reporting(adev->pdev);
3472 /* Post card if necessary */
3473 if (amdgpu_device_need_post(adev)) {
3475 dev_err(adev->dev, "no vBIOS found\n");
3479 DRM_INFO("GPU posting now...\n");
3480 r = amdgpu_device_asic_init(adev);
3482 dev_err(adev->dev, "gpu post error!\n");
3487 if (adev->is_atom_fw) {
3488 /* Initialize clocks */
3489 r = amdgpu_atomfirmware_get_clock_info(adev);
3491 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3492 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3496 /* Initialize clocks */
3497 r = amdgpu_atombios_get_clock_info(adev);
3499 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3500 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3503 /* init i2c buses */
3504 if (!amdgpu_device_has_dc_support(adev))
3505 amdgpu_atombios_i2c_init(adev);
3510 r = amdgpu_fence_driver_init(adev);
3512 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
3513 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3517 /* init the mode config */
3518 drm_mode_config_init(adev_to_drm(adev));
3520 r = amdgpu_device_ip_init(adev);
3522 /* failed in exclusive mode due to timeout */
3523 if (amdgpu_sriov_vf(adev) &&
3524 !amdgpu_sriov_runtime(adev) &&
3525 amdgpu_virt_mmio_blocked(adev) &&
3526 !amdgpu_virt_wait_reset(adev)) {
3527 dev_err(adev->dev, "VF exclusive mode timeout\n");
3528 /* Don't send request since VF is inactive. */
3529 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3530 adev->virt.ops = NULL;
3532 goto release_ras_con;
3534 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3535 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3536 goto release_ras_con;
3540 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3541 adev->gfx.config.max_shader_engines,
3542 adev->gfx.config.max_sh_per_se,
3543 adev->gfx.config.max_cu_per_sh,
3544 adev->gfx.cu_info.number);
3546 adev->accel_working = true;
3548 amdgpu_vm_check_compute_bug(adev);
3550 /* Initialize the buffer migration limit. */
3551 if (amdgpu_moverate >= 0)
3552 max_MBps = amdgpu_moverate;
3554 max_MBps = 8; /* Allow 8 MB/s. */
3555 /* Get a log2 for easy divisions. */
3556 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3558 amdgpu_fbdev_init(adev);
3560 r = amdgpu_pm_sysfs_init(adev);
3562 adev->pm_sysfs_en = false;
3563 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3565 adev->pm_sysfs_en = true;
3567 r = amdgpu_ucode_sysfs_init(adev);
3569 adev->ucode_sysfs_en = false;
3570 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3572 adev->ucode_sysfs_en = true;
3574 if ((amdgpu_testing & 1)) {
3575 if (adev->accel_working)
3576 amdgpu_test_moves(adev);
3578 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3580 if (amdgpu_benchmarking) {
3581 if (adev->accel_working)
3582 amdgpu_benchmark(adev, amdgpu_benchmarking);
3584 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3588 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3589 * Otherwise the mgpu fan boost feature will be skipped due to the
3590 * gpu instance is counted less.
3592 amdgpu_register_gpu_instance(adev);
3594 /* enable clockgating, etc. after ib tests, etc. since some blocks require
3595 * explicit gating rather than handling it automatically.
3597 if (!adev->gmc.xgmi.pending_reset) {
3598 r = amdgpu_device_ip_late_init(adev);
3600 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3601 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3602 goto release_ras_con;
3605 amdgpu_ras_resume(adev);
3606 queue_delayed_work(system_wq, &adev->delayed_init_work,
3607 msecs_to_jiffies(AMDGPU_RESUME_MS));
3610 if (amdgpu_sriov_vf(adev))
3611 flush_delayed_work(&adev->delayed_init_work);
3613 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3615 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3617 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3618 r = amdgpu_pmu_init(adev);
3620 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3622 /* Have stored pci confspace at hand for restore in sudden PCI error */
3623 if (amdgpu_device_cache_pci_state(adev->pdev))
3624 pci_restore_state(pdev);
3626 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3627 /* this will fail for cards that aren't VGA class devices, just
3629 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3630 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
3632 if (amdgpu_device_supports_px(ddev)) {
3634 vga_switcheroo_register_client(adev->pdev,
3635 &amdgpu_switcheroo_ops, px);
3636 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3639 if (adev->gmc.xgmi.pending_reset)
3640 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3641 msecs_to_jiffies(AMDGPU_RESUME_MS));
3646 amdgpu_release_ras_context(adev);
3649 amdgpu_vf_error_trans_all(adev);
3652 iounmap(adev->rmmio);
3659 * amdgpu_device_fini - tear down the driver
3661 * @adev: amdgpu_device pointer
3663 * Tear down the driver info (all asics).
3664 * Called at driver shutdown.
3666 void amdgpu_device_fini(struct amdgpu_device *adev)
3668 dev_info(adev->dev, "amdgpu: finishing device.\n");
3669 flush_delayed_work(&adev->delayed_init_work);
3670 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3671 adev->shutdown = true;
3673 kfree(adev->pci_state);
3675 /* make sure IB test finished before entering exclusive mode
3676 * to avoid preemption on IB test
3678 if (amdgpu_sriov_vf(adev)) {
3679 amdgpu_virt_request_full_gpu(adev, false);
3680 amdgpu_virt_fini_data_exchange(adev);
3683 /* disable all interrupts */
3684 amdgpu_irq_disable_all(adev);
3685 if (adev->mode_info.mode_config_initialized){
3686 if (!amdgpu_device_has_dc_support(adev))
3687 drm_helper_force_disable_all(adev_to_drm(adev));
3689 drm_atomic_helper_shutdown(adev_to_drm(adev));
3691 amdgpu_fence_driver_fini(adev);
3692 if (adev->pm_sysfs_en)
3693 amdgpu_pm_sysfs_fini(adev);
3694 amdgpu_fbdev_fini(adev);
3695 amdgpu_device_ip_fini(adev);
3696 release_firmware(adev->firmware.gpu_info_fw);
3697 adev->firmware.gpu_info_fw = NULL;
3698 adev->accel_working = false;
3700 amdgpu_reset_fini(adev);
3702 /* free i2c buses */
3703 if (!amdgpu_device_has_dc_support(adev))
3704 amdgpu_i2c_fini(adev);
3706 if (amdgpu_emu_mode != 1)
3707 amdgpu_atombios_fini(adev);
3711 if (amdgpu_device_supports_px(adev_to_drm(adev))) {
3712 vga_switcheroo_unregister_client(adev->pdev);
3713 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3715 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3716 vga_client_register(adev->pdev, NULL, NULL, NULL);
3717 iounmap(adev->rmmio);
3719 amdgpu_device_doorbell_fini(adev);
3721 if (adev->ucode_sysfs_en)
3722 amdgpu_ucode_sysfs_fini(adev);
3724 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3725 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3726 amdgpu_pmu_fini(adev);
3727 if (adev->mman.discovery_bin)
3728 amdgpu_discovery_fini(adev);
3736 * amdgpu_device_suspend - initiate device suspend
3738 * @dev: drm dev pointer
3739 * @fbcon : notify the fbdev of suspend
3741 * Puts the hw in the suspend state (all asics).
3742 * Returns 0 for success or an error on failure.
3743 * Called at driver suspend.
3745 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
3747 struct amdgpu_device *adev = drm_to_adev(dev);
3749 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3752 adev->in_suspend = true;
3753 drm_kms_helper_poll_disable(dev);
3756 amdgpu_fbdev_set_suspend(adev, 1);
3758 cancel_delayed_work_sync(&adev->delayed_init_work);
3760 amdgpu_ras_suspend(adev);
3762 amdgpu_device_ip_suspend_phase1(adev);
3765 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
3767 /* evict vram memory */
3768 amdgpu_bo_evict_vram(adev);
3770 amdgpu_fence_driver_suspend(adev);
3772 amdgpu_device_ip_suspend_phase2(adev);
3773 /* evict remaining vram memory
3774 * This second call to evict vram is to evict the gart page table
3777 amdgpu_bo_evict_vram(adev);
3783 * amdgpu_device_resume - initiate device resume
3785 * @dev: drm dev pointer
3786 * @fbcon : notify the fbdev of resume
3788 * Bring the hw back to operating state (all asics).
3789 * Returns 0 for success or an error on failure.
3790 * Called at driver resume.
3792 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
3794 struct amdgpu_device *adev = drm_to_adev(dev);
3797 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3801 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
3804 if (amdgpu_device_need_post(adev)) {
3805 r = amdgpu_device_asic_init(adev);
3807 dev_err(adev->dev, "amdgpu asic init failed\n");
3810 r = amdgpu_device_ip_resume(adev);
3812 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
3815 amdgpu_fence_driver_resume(adev);
3818 r = amdgpu_device_ip_late_init(adev);
3822 queue_delayed_work(system_wq, &adev->delayed_init_work,
3823 msecs_to_jiffies(AMDGPU_RESUME_MS));
3825 if (!adev->in_s0ix) {
3826 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
3831 /* Make sure IB tests flushed */
3832 flush_delayed_work(&adev->delayed_init_work);
3835 amdgpu_fbdev_set_suspend(adev, 0);
3837 drm_kms_helper_poll_enable(dev);
3839 amdgpu_ras_resume(adev);
3842 * Most of the connector probing functions try to acquire runtime pm
3843 * refs to ensure that the GPU is powered on when connector polling is
3844 * performed. Since we're calling this from a runtime PM callback,
3845 * trying to acquire rpm refs will cause us to deadlock.
3847 * Since we're guaranteed to be holding the rpm lock, it's safe to
3848 * temporarily disable the rpm helpers so this doesn't deadlock us.
3851 dev->dev->power.disable_depth++;
3853 if (!amdgpu_device_has_dc_support(adev))
3854 drm_helper_hpd_irq_event(dev);
3856 drm_kms_helper_hotplug_event(dev);
3858 dev->dev->power.disable_depth--;
3860 adev->in_suspend = false;
3866 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
3868 * @adev: amdgpu_device pointer
3870 * The list of all the hardware IPs that make up the asic is walked and
3871 * the check_soft_reset callbacks are run. check_soft_reset determines
3872 * if the asic is still hung or not.
3873 * Returns true if any of the IPs are still in a hung state, false if not.
3875 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
3878 bool asic_hang = false;
3880 if (amdgpu_sriov_vf(adev))
3883 if (amdgpu_asic_need_full_reset(adev))
3886 for (i = 0; i < adev->num_ip_blocks; i++) {
3887 if (!adev->ip_blocks[i].status.valid)
3889 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
3890 adev->ip_blocks[i].status.hang =
3891 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
3892 if (adev->ip_blocks[i].status.hang) {
3893 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
3901 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
3903 * @adev: amdgpu_device pointer
3905 * The list of all the hardware IPs that make up the asic is walked and the
3906 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
3907 * handles any IP specific hardware or software state changes that are
3908 * necessary for a soft reset to succeed.
3909 * Returns 0 on success, negative error code on failure.
3911 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
3915 for (i = 0; i < adev->num_ip_blocks; i++) {
3916 if (!adev->ip_blocks[i].status.valid)
3918 if (adev->ip_blocks[i].status.hang &&
3919 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
3920 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
3930 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
3932 * @adev: amdgpu_device pointer
3934 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
3935 * reset is necessary to recover.
3936 * Returns true if a full asic reset is required, false if not.
3938 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
3942 if (amdgpu_asic_need_full_reset(adev))
3945 for (i = 0; i < adev->num_ip_blocks; i++) {
3946 if (!adev->ip_blocks[i].status.valid)
3948 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
3949 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
3950 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
3951 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
3952 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3953 if (adev->ip_blocks[i].status.hang) {
3954 dev_info(adev->dev, "Some block need full reset!\n");
3963 * amdgpu_device_ip_soft_reset - do a soft reset
3965 * @adev: amdgpu_device pointer
3967 * The list of all the hardware IPs that make up the asic is walked and the
3968 * soft_reset callbacks are run if the block is hung. soft_reset handles any
3969 * IP specific hardware or software state changes that are necessary to soft
3971 * Returns 0 on success, negative error code on failure.
3973 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
3977 for (i = 0; i < adev->num_ip_blocks; i++) {
3978 if (!adev->ip_blocks[i].status.valid)
3980 if (adev->ip_blocks[i].status.hang &&
3981 adev->ip_blocks[i].version->funcs->soft_reset) {
3982 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
3992 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
3994 * @adev: amdgpu_device pointer
3996 * The list of all the hardware IPs that make up the asic is walked and the
3997 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
3998 * handles any IP specific hardware or software state changes that are
3999 * necessary after the IP has been soft reset.
4000 * Returns 0 on success, negative error code on failure.
4002 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4006 for (i = 0; i < adev->num_ip_blocks; i++) {
4007 if (!adev->ip_blocks[i].status.valid)
4009 if (adev->ip_blocks[i].status.hang &&
4010 adev->ip_blocks[i].version->funcs->post_soft_reset)
4011 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4020 * amdgpu_device_recover_vram - Recover some VRAM contents
4022 * @adev: amdgpu_device pointer
4024 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
4025 * restore things like GPUVM page tables after a GPU reset where
4026 * the contents of VRAM might be lost.
4029 * 0 on success, negative error code on failure.
4031 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4033 struct dma_fence *fence = NULL, *next = NULL;
4034 struct amdgpu_bo *shadow;
4037 if (amdgpu_sriov_runtime(adev))
4038 tmo = msecs_to_jiffies(8000);
4040 tmo = msecs_to_jiffies(100);
4042 dev_info(adev->dev, "recover vram bo from shadow start\n");
4043 mutex_lock(&adev->shadow_list_lock);
4044 list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
4046 /* No need to recover an evicted BO */
4047 if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
4048 shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
4049 shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
4052 r = amdgpu_bo_restore_shadow(shadow, &next);
4057 tmo = dma_fence_wait_timeout(fence, false, tmo);
4058 dma_fence_put(fence);
4063 } else if (tmo < 0) {
4071 mutex_unlock(&adev->shadow_list_lock);
4074 tmo = dma_fence_wait_timeout(fence, false, tmo);
4075 dma_fence_put(fence);
4077 if (r < 0 || tmo <= 0) {
4078 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4082 dev_info(adev->dev, "recover vram bo from shadow done\n");
4088 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4090 * @adev: amdgpu_device pointer
4091 * @from_hypervisor: request from hypervisor
4093 * do VF FLR and reinitialize Asic
4094 * return 0 means succeeded otherwise failed
4096 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4097 bool from_hypervisor)
4101 if (from_hypervisor)
4102 r = amdgpu_virt_request_full_gpu(adev, true);
4104 r = amdgpu_virt_reset_gpu(adev);
4108 amdgpu_amdkfd_pre_reset(adev);
4110 /* Resume IP prior to SMC */
4111 r = amdgpu_device_ip_reinit_early_sriov(adev);
4115 amdgpu_virt_init_data_exchange(adev);
4116 /* we need recover gart prior to run SMC/CP/SDMA resume */
4117 amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
4119 r = amdgpu_device_fw_loading(adev);
4123 /* now we are okay to resume SMC/CP/SDMA */
4124 r = amdgpu_device_ip_reinit_late_sriov(adev);
4128 amdgpu_irq_gpu_reset_resume_helper(adev);
4129 r = amdgpu_ib_ring_tests(adev);
4130 amdgpu_amdkfd_post_reset(adev);
4133 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4134 amdgpu_inc_vram_lost(adev);
4135 r = amdgpu_device_recover_vram(adev);
4137 amdgpu_virt_release_full_gpu(adev, true);
4143 * amdgpu_device_has_job_running - check if there is any job in mirror list
4145 * @adev: amdgpu_device pointer
4147 * check if there is any job in mirror list
4149 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4152 struct drm_sched_job *job;
4154 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4155 struct amdgpu_ring *ring = adev->rings[i];
4157 if (!ring || !ring->sched.thread)
4160 spin_lock(&ring->sched.job_list_lock);
4161 job = list_first_entry_or_null(&ring->sched.pending_list,
4162 struct drm_sched_job, list);
4163 spin_unlock(&ring->sched.job_list_lock);
4171 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4173 * @adev: amdgpu_device pointer
4175 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4178 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4180 if (!amdgpu_device_ip_check_soft_reset(adev)) {
4181 dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
4185 if (amdgpu_gpu_recovery == 0)
4188 if (amdgpu_sriov_vf(adev))
4191 if (amdgpu_gpu_recovery == -1) {
4192 switch (adev->asic_type) {
4198 case CHIP_POLARIS10:
4199 case CHIP_POLARIS11:
4200 case CHIP_POLARIS12:
4211 case CHIP_SIENNA_CICHLID:
4212 case CHIP_NAVY_FLOUNDER:
4213 case CHIP_DIMGREY_CAVEFISH:
4215 case CHIP_ALDEBARAN:
4225 dev_info(adev->dev, "GPU recovery disabled.\n");
4229 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4234 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4236 dev_info(adev->dev, "GPU mode1 reset\n");
4239 pci_clear_master(adev->pdev);
4241 amdgpu_device_cache_pci_state(adev->pdev);
4243 if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4244 dev_info(adev->dev, "GPU smu mode1 reset\n");
4245 ret = amdgpu_dpm_mode1_reset(adev);
4247 dev_info(adev->dev, "GPU psp mode1 reset\n");
4248 ret = psp_gpu_reset(adev);
4252 dev_err(adev->dev, "GPU mode1 reset failed\n");
4254 amdgpu_device_load_pci_state(adev->pdev);
4256 /* wait for asic to come out of reset */
4257 for (i = 0; i < adev->usec_timeout; i++) {
4258 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4260 if (memsize != 0xffffffff)
4265 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4269 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4270 struct amdgpu_reset_context *reset_context)
4273 struct amdgpu_job *job = NULL;
4274 bool need_full_reset =
4275 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4277 if (reset_context->reset_req_dev == adev)
4278 job = reset_context->job;
4280 /* no need to dump if device is not in good state during probe period */
4281 if (!adev->gmc.xgmi.pending_reset)
4282 amdgpu_debugfs_wait_dump(adev);
4284 if (amdgpu_sriov_vf(adev)) {
4285 /* stop the data exchange thread */
4286 amdgpu_virt_fini_data_exchange(adev);
4289 /* block all schedulers and reset given job's ring */
4290 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4291 struct amdgpu_ring *ring = adev->rings[i];
4293 if (!ring || !ring->sched.thread)
4296 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4297 amdgpu_fence_driver_force_completion(ring);
4301 drm_sched_increase_karma(&job->base);
4303 r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4304 /* If reset handler not implemented, continue; otherwise return */
4310 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4311 if (!amdgpu_sriov_vf(adev)) {
4313 if (!need_full_reset)
4314 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4316 if (!need_full_reset) {
4317 amdgpu_device_ip_pre_soft_reset(adev);
4318 r = amdgpu_device_ip_soft_reset(adev);
4319 amdgpu_device_ip_post_soft_reset(adev);
4320 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4321 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4322 need_full_reset = true;
4326 if (need_full_reset)
4327 r = amdgpu_device_ip_suspend(adev);
4328 if (need_full_reset)
4329 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4331 clear_bit(AMDGPU_NEED_FULL_RESET,
4332 &reset_context->flags);
4338 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4339 struct amdgpu_reset_context *reset_context)
4341 struct amdgpu_device *tmp_adev = NULL;
4342 bool need_full_reset, skip_hw_reset, vram_lost = false;
4345 /* Try reset handler method first */
4346 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4348 r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4349 /* If reset handler not implemented, continue; otherwise return */
4355 /* Reset handler not implemented, use the default method */
4357 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4358 skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4361 * ASIC reset has to be done on all XGMI hive nodes ASAP
4362 * to allow proper links negotiation in FW (within 1 sec)
4364 if (!skip_hw_reset && need_full_reset) {
4365 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4366 /* For XGMI run all resets in parallel to speed up the process */
4367 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4368 tmp_adev->gmc.xgmi.pending_reset = false;
4369 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4372 r = amdgpu_asic_reset(tmp_adev);
4375 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4376 r, adev_to_drm(tmp_adev)->unique);
4381 /* For XGMI wait for all resets to complete before proceed */
4383 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4384 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4385 flush_work(&tmp_adev->xgmi_reset_work);
4386 r = tmp_adev->asic_reset_res;
4394 if (!r && amdgpu_ras_intr_triggered()) {
4395 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4396 if (tmp_adev->mmhub.ras_funcs &&
4397 tmp_adev->mmhub.ras_funcs->reset_ras_error_count)
4398 tmp_adev->mmhub.ras_funcs->reset_ras_error_count(tmp_adev);
4401 amdgpu_ras_intr_cleared();
4404 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4405 if (need_full_reset) {
4407 r = amdgpu_device_asic_init(tmp_adev);
4409 dev_warn(tmp_adev->dev, "asic atom init failed!");
4411 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4412 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4416 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4418 DRM_INFO("VRAM is lost due to GPU reset!\n");
4419 amdgpu_inc_vram_lost(tmp_adev);
4422 r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
4426 r = amdgpu_device_fw_loading(tmp_adev);
4430 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4435 amdgpu_device_fill_reset_magic(tmp_adev);
4438 * Add this ASIC as tracked as reset was already
4439 * complete successfully.
4441 amdgpu_register_gpu_instance(tmp_adev);
4443 if (!reset_context->hive &&
4444 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4445 amdgpu_xgmi_add_device(tmp_adev);
4447 r = amdgpu_device_ip_late_init(tmp_adev);
4451 amdgpu_fbdev_set_suspend(tmp_adev, 0);
4454 * The GPU enters bad state once faulty pages
4455 * by ECC has reached the threshold, and ras
4456 * recovery is scheduled next. So add one check
4457 * here to break recovery if it indeed exceeds
4458 * bad page threshold, and remind user to
4459 * retire this GPU or setting one bigger
4460 * bad_page_threshold value to fix this once
4461 * probing driver again.
4463 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4465 amdgpu_ras_resume(tmp_adev);
4471 /* Update PSP FW topology after reset */
4472 if (reset_context->hive &&
4473 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4474 r = amdgpu_xgmi_update_topology(
4475 reset_context->hive, tmp_adev);
4481 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4482 r = amdgpu_ib_ring_tests(tmp_adev);
4484 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4485 need_full_reset = true;
4492 r = amdgpu_device_recover_vram(tmp_adev);
4494 tmp_adev->asic_reset_res = r;
4498 if (need_full_reset)
4499 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4501 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4505 static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
4506 struct amdgpu_hive_info *hive)
4508 if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
4512 down_write_nest_lock(&adev->reset_sem, &hive->hive_lock);
4514 down_write(&adev->reset_sem);
4517 switch (amdgpu_asic_reset_method(adev)) {
4518 case AMD_RESET_METHOD_MODE1:
4519 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4521 case AMD_RESET_METHOD_MODE2:
4522 adev->mp1_state = PP_MP1_STATE_RESET;
4525 adev->mp1_state = PP_MP1_STATE_NONE;
4532 static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
4534 amdgpu_vf_error_trans_all(adev);
4535 adev->mp1_state = PP_MP1_STATE_NONE;
4536 atomic_set(&adev->in_gpu_reset, 0);
4537 up_write(&adev->reset_sem);
4541 * to lockup a list of amdgpu devices in a hive safely, if not a hive
4542 * with multiple nodes, it will be similar as amdgpu_device_lock_adev.
4544 * unlock won't require roll back.
4546 static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive)
4548 struct amdgpu_device *tmp_adev = NULL;
4550 if (adev->gmc.xgmi.num_physical_nodes > 1) {
4552 dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes");
4555 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4556 if (!amdgpu_device_lock_adev(tmp_adev, hive))
4559 } else if (!amdgpu_device_lock_adev(adev, hive))
4564 if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) {
4566 * if the lockup iteration break in the middle of a hive,
4567 * it may means there may has a race issue,
4568 * or a hive device locked up independently.
4569 * we may be in trouble and may not, so will try to roll back
4570 * the lock and give out a warnning.
4572 dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock");
4573 list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4574 amdgpu_device_unlock_adev(tmp_adev);
4580 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4582 struct pci_dev *p = NULL;
4584 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4585 adev->pdev->bus->number, 1);
4587 pm_runtime_enable(&(p->dev));
4588 pm_runtime_resume(&(p->dev));
4592 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4594 enum amd_reset_method reset_method;
4595 struct pci_dev *p = NULL;
4599 * For now, only BACO and mode1 reset are confirmed
4600 * to suffer the audio issue without proper suspended.
4602 reset_method = amdgpu_asic_reset_method(adev);
4603 if ((reset_method != AMD_RESET_METHOD_BACO) &&
4604 (reset_method != AMD_RESET_METHOD_MODE1))
4607 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4608 adev->pdev->bus->number, 1);
4612 expires = pm_runtime_autosuspend_expiration(&(p->dev));
4615 * If we cannot get the audio device autosuspend delay,
4616 * a fixed 4S interval will be used. Considering 3S is
4617 * the audio controller default autosuspend delay setting.
4618 * 4S used here is guaranteed to cover that.
4620 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4622 while (!pm_runtime_status_suspended(&(p->dev))) {
4623 if (!pm_runtime_suspend(&(p->dev)))
4626 if (expires < ktime_get_mono_fast_ns()) {
4627 dev_warn(adev->dev, "failed to suspend display audio\n");
4628 /* TODO: abort the succeeding gpu reset? */
4633 pm_runtime_disable(&(p->dev));
4638 static void amdgpu_device_recheck_guilty_jobs(
4639 struct amdgpu_device *adev, struct list_head *device_list_handle,
4640 struct amdgpu_reset_context *reset_context)
4644 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4645 struct amdgpu_ring *ring = adev->rings[i];
4647 struct drm_sched_job *s_job;
4649 if (!ring || !ring->sched.thread)
4652 s_job = list_first_entry_or_null(&ring->sched.pending_list,
4653 struct drm_sched_job, list);
4657 /* clear job's guilty and depend the folowing step to decide the real one */
4658 drm_sched_reset_karma(s_job);
4659 drm_sched_resubmit_jobs_ext(&ring->sched, 1);
4661 ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
4662 if (ret == 0) { /* timeout */
4663 DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
4664 ring->sched.name, s_job->id);
4667 drm_sched_increase_karma(s_job);
4670 if (amdgpu_sriov_vf(adev)) {
4671 amdgpu_virt_fini_data_exchange(adev);
4672 r = amdgpu_device_reset_sriov(adev, false);
4674 adev->asic_reset_res = r;
4676 clear_bit(AMDGPU_SKIP_HW_RESET,
4677 &reset_context->flags);
4678 r = amdgpu_do_asic_reset(device_list_handle,
4680 if (r && r == -EAGAIN)
4685 * add reset counter so that the following
4686 * resubmitted job could flush vmid
4688 atomic_inc(&adev->gpu_reset_counter);
4692 /* got the hw fence, signal finished fence */
4693 atomic_dec(ring->sched.score);
4694 dma_fence_get(&s_job->s_fence->finished);
4695 dma_fence_signal(&s_job->s_fence->finished);
4696 dma_fence_put(&s_job->s_fence->finished);
4698 /* remove node from list and free the job */
4699 spin_lock(&ring->sched.job_list_lock);
4700 list_del_init(&s_job->list);
4701 spin_unlock(&ring->sched.job_list_lock);
4702 ring->sched.ops->free_job(s_job);
4707 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
4709 * @adev: amdgpu_device pointer
4710 * @job: which job trigger hang
4712 * Attempt to reset the GPU if it has hung (all asics).
4713 * Attempt to do soft-reset or full-reset and reinitialize Asic
4714 * Returns 0 for success or an error on failure.
4717 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
4718 struct amdgpu_job *job)
4720 struct list_head device_list, *device_list_handle = NULL;
4721 bool job_signaled = false;
4722 struct amdgpu_hive_info *hive = NULL;
4723 struct amdgpu_device *tmp_adev = NULL;
4725 bool need_emergency_restart = false;
4726 bool audio_suspended = false;
4727 int tmp_vram_lost_counter;
4728 struct amdgpu_reset_context reset_context;
4730 memset(&reset_context, 0, sizeof(reset_context));
4733 * Special case: RAS triggered and full reset isn't supported
4735 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
4738 * Flush RAM to disk so that after reboot
4739 * the user can read log and see why the system rebooted.
4741 if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
4742 DRM_WARN("Emergency reboot.");
4745 emergency_restart();
4748 dev_info(adev->dev, "GPU %s begin!\n",
4749 need_emergency_restart ? "jobs stop":"reset");
4752 * Here we trylock to avoid chain of resets executing from
4753 * either trigger by jobs on different adevs in XGMI hive or jobs on
4754 * different schedulers for same device while this TO handler is running.
4755 * We always reset all schedulers for device and all devices for XGMI
4756 * hive so that should take care of them too.
4758 hive = amdgpu_get_xgmi_hive(adev);
4760 if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
4761 DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
4762 job ? job->base.id : -1, hive->hive_id);
4763 amdgpu_put_xgmi_hive(hive);
4765 drm_sched_increase_karma(&job->base);
4768 mutex_lock(&hive->hive_lock);
4771 reset_context.method = AMD_RESET_METHOD_NONE;
4772 reset_context.reset_req_dev = adev;
4773 reset_context.job = job;
4774 reset_context.hive = hive;
4775 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
4778 * lock the device before we try to operate the linked list
4779 * if didn't get the device lock, don't touch the linked list since
4780 * others may iterating it.
4782 r = amdgpu_device_lock_hive_adev(adev, hive);
4784 dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
4785 job ? job->base.id : -1);
4787 /* even we skipped this reset, still need to set the job to guilty */
4789 drm_sched_increase_karma(&job->base);
4794 * Build list of devices to reset.
4795 * In case we are in XGMI hive mode, resort the device list
4796 * to put adev in the 1st position.
4798 INIT_LIST_HEAD(&device_list);
4799 if (adev->gmc.xgmi.num_physical_nodes > 1) {
4800 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
4801 list_add_tail(&tmp_adev->reset_list, &device_list);
4802 if (!list_is_first(&adev->reset_list, &device_list))
4803 list_rotate_to_front(&adev->reset_list, &device_list);
4804 device_list_handle = &device_list;
4806 list_add_tail(&adev->reset_list, &device_list);
4807 device_list_handle = &device_list;
4810 /* block all schedulers and reset given job's ring */
4811 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4813 * Try to put the audio codec into suspend state
4814 * before gpu reset started.
4816 * Due to the power domain of the graphics device
4817 * is shared with AZ power domain. Without this,
4818 * we may change the audio hardware from behind
4819 * the audio driver's back. That will trigger
4820 * some audio codec errors.
4822 if (!amdgpu_device_suspend_display_audio(tmp_adev))
4823 audio_suspended = true;
4825 amdgpu_ras_set_error_query_ready(tmp_adev, false);
4827 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
4829 if (!amdgpu_sriov_vf(tmp_adev))
4830 amdgpu_amdkfd_pre_reset(tmp_adev);
4833 * Mark these ASICs to be reseted as untracked first
4834 * And add them back after reset completed
4836 amdgpu_unregister_gpu_instance(tmp_adev);
4838 amdgpu_fbdev_set_suspend(tmp_adev, 1);
4840 /* disable ras on ALL IPs */
4841 if (!need_emergency_restart &&
4842 amdgpu_device_ip_need_full_reset(tmp_adev))
4843 amdgpu_ras_suspend(tmp_adev);
4845 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4846 struct amdgpu_ring *ring = tmp_adev->rings[i];
4848 if (!ring || !ring->sched.thread)
4851 drm_sched_stop(&ring->sched, job ? &job->base : NULL);
4853 if (need_emergency_restart)
4854 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
4856 atomic_inc(&tmp_adev->gpu_reset_counter);
4859 if (need_emergency_restart)
4860 goto skip_sched_resume;
4863 * Must check guilty signal here since after this point all old
4864 * HW fences are force signaled.
4866 * job->base holds a reference to parent fence
4868 if (job && job->base.s_fence->parent &&
4869 dma_fence_is_signaled(job->base.s_fence->parent)) {
4870 job_signaled = true;
4871 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
4875 retry: /* Rest of adevs pre asic reset from XGMI hive. */
4876 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4877 r = amdgpu_device_pre_asic_reset(tmp_adev, &reset_context);
4878 /*TODO Should we stop ?*/
4880 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
4881 r, adev_to_drm(tmp_adev)->unique);
4882 tmp_adev->asic_reset_res = r;
4886 tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
4887 /* Actual ASIC resets if needed.*/
4888 /* TODO Implement XGMI hive reset logic for SRIOV */
4889 if (amdgpu_sriov_vf(adev)) {
4890 r = amdgpu_device_reset_sriov(adev, job ? false : true);
4892 adev->asic_reset_res = r;
4894 r = amdgpu_do_asic_reset(device_list_handle, &reset_context);
4895 if (r && r == -EAGAIN)
4901 /* Post ASIC reset for all devs .*/
4902 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4905 * Sometimes a later bad compute job can block a good gfx job as gfx
4906 * and compute ring share internal GC HW mutually. We add an additional
4907 * guilty jobs recheck step to find the real guilty job, it synchronously
4908 * submits and pends for the first job being signaled. If it gets timeout,
4909 * we identify it as a real guilty job.
4911 if (amdgpu_gpu_recovery == 2 &&
4912 !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
4913 amdgpu_device_recheck_guilty_jobs(
4914 tmp_adev, device_list_handle, &reset_context);
4916 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4917 struct amdgpu_ring *ring = tmp_adev->rings[i];
4919 if (!ring || !ring->sched.thread)
4922 /* No point to resubmit jobs if we didn't HW reset*/
4923 if (!tmp_adev->asic_reset_res && !job_signaled)
4924 drm_sched_resubmit_jobs(&ring->sched);
4926 drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
4929 if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
4930 drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
4933 tmp_adev->asic_reset_res = 0;
4936 /* bad news, how to tell it to userspace ? */
4937 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
4938 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
4940 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
4945 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4946 /* unlock kfd: SRIOV would do it separately */
4947 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
4948 amdgpu_amdkfd_post_reset(tmp_adev);
4950 /* kfd_post_reset will do nothing if kfd device is not initialized,
4951 * need to bring up kfd here if it's not be initialized before
4953 if (!adev->kfd.init_complete)
4954 amdgpu_amdkfd_device_init(adev);
4956 if (audio_suspended)
4957 amdgpu_device_resume_display_audio(tmp_adev);
4958 amdgpu_device_unlock_adev(tmp_adev);
4963 atomic_set(&hive->in_reset, 0);
4964 mutex_unlock(&hive->hive_lock);
4965 amdgpu_put_xgmi_hive(hive);
4968 if (r && r != -EAGAIN)
4969 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
4974 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
4976 * @adev: amdgpu_device pointer
4978 * Fetchs and stores in the driver the PCIE capabilities (gen speed
4979 * and lanes) of the slot the device is in. Handles APUs and
4980 * virtualized environments where PCIE config space may not be available.
4982 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
4984 struct pci_dev *pdev;
4985 enum pci_bus_speed speed_cap, platform_speed_cap;
4986 enum pcie_link_width platform_link_width;
4988 if (amdgpu_pcie_gen_cap)
4989 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
4991 if (amdgpu_pcie_lane_cap)
4992 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
4994 /* covers APUs as well */
4995 if (pci_is_root_bus(adev->pdev->bus)) {
4996 if (adev->pm.pcie_gen_mask == 0)
4997 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
4998 if (adev->pm.pcie_mlw_mask == 0)
4999 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5003 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5006 pcie_bandwidth_available(adev->pdev, NULL,
5007 &platform_speed_cap, &platform_link_width);
5009 if (adev->pm.pcie_gen_mask == 0) {
5012 speed_cap = pcie_get_speed_cap(pdev);
5013 if (speed_cap == PCI_SPEED_UNKNOWN) {
5014 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5015 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5016 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5018 if (speed_cap == PCIE_SPEED_32_0GT)
5019 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5020 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5021 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5022 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5023 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5024 else if (speed_cap == PCIE_SPEED_16_0GT)
5025 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5026 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5027 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5028 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5029 else if (speed_cap == PCIE_SPEED_8_0GT)
5030 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5031 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5032 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5033 else if (speed_cap == PCIE_SPEED_5_0GT)
5034 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5035 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5037 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5040 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5041 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5042 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5044 if (platform_speed_cap == PCIE_SPEED_32_0GT)
5045 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5046 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5047 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5048 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5049 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5050 else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5051 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5052 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5053 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5054 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5055 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5056 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5057 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5058 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5059 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5060 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5061 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5063 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5067 if (adev->pm.pcie_mlw_mask == 0) {
5068 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5069 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5071 switch (platform_link_width) {
5073 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5074 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5075 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5076 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5077 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5078 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5079 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5082 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5083 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5084 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5085 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5086 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5087 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5090 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5091 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5092 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5093 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5094 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5097 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5098 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5099 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5100 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5103 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5104 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5105 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5108 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5109 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5112 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5121 int amdgpu_device_baco_enter(struct drm_device *dev)
5123 struct amdgpu_device *adev = drm_to_adev(dev);
5124 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5126 if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5129 if (ras && adev->ras_enabled &&
5130 adev->nbio.funcs->enable_doorbell_interrupt)
5131 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5133 return amdgpu_dpm_baco_enter(adev);
5136 int amdgpu_device_baco_exit(struct drm_device *dev)
5138 struct amdgpu_device *adev = drm_to_adev(dev);
5139 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5142 if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5145 ret = amdgpu_dpm_baco_exit(adev);
5149 if (ras && adev->ras_enabled &&
5150 adev->nbio.funcs->enable_doorbell_interrupt)
5151 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5156 static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
5160 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5161 struct amdgpu_ring *ring = adev->rings[i];
5163 if (!ring || !ring->sched.thread)
5166 cancel_delayed_work_sync(&ring->sched.work_tdr);
5171 * amdgpu_pci_error_detected - Called when a PCI error is detected.
5172 * @pdev: PCI device struct
5173 * @state: PCI channel state
5175 * Description: Called when a PCI error is detected.
5177 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5179 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5181 struct drm_device *dev = pci_get_drvdata(pdev);
5182 struct amdgpu_device *adev = drm_to_adev(dev);
5185 DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5187 if (adev->gmc.xgmi.num_physical_nodes > 1) {
5188 DRM_WARN("No support for XGMI hive yet...");
5189 return PCI_ERS_RESULT_DISCONNECT;
5193 case pci_channel_io_normal:
5194 return PCI_ERS_RESULT_CAN_RECOVER;
5195 /* Fatal error, prepare for slot reset */
5196 case pci_channel_io_frozen:
5198 * Cancel and wait for all TDRs in progress if failing to
5199 * set adev->in_gpu_reset in amdgpu_device_lock_adev
5201 * Locking adev->reset_sem will prevent any external access
5202 * to GPU during PCI error recovery
5204 while (!amdgpu_device_lock_adev(adev, NULL))
5205 amdgpu_cancel_all_tdr(adev);
5208 * Block any work scheduling as we do for regular GPU reset
5209 * for the duration of the recovery
5211 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5212 struct amdgpu_ring *ring = adev->rings[i];
5214 if (!ring || !ring->sched.thread)
5217 drm_sched_stop(&ring->sched, NULL);
5219 atomic_inc(&adev->gpu_reset_counter);
5220 return PCI_ERS_RESULT_NEED_RESET;
5221 case pci_channel_io_perm_failure:
5222 /* Permanent error, prepare for device removal */
5223 return PCI_ERS_RESULT_DISCONNECT;
5226 return PCI_ERS_RESULT_NEED_RESET;
5230 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5231 * @pdev: pointer to PCI device
5233 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5236 DRM_INFO("PCI error: mmio enabled callback!!\n");
5238 /* TODO - dump whatever for debugging purposes */
5240 /* This called only if amdgpu_pci_error_detected returns
5241 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5242 * works, no need to reset slot.
5245 return PCI_ERS_RESULT_RECOVERED;
5249 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5250 * @pdev: PCI device struct
5252 * Description: This routine is called by the pci error recovery
5253 * code after the PCI slot has been reset, just before we
5254 * should resume normal operations.
5256 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5258 struct drm_device *dev = pci_get_drvdata(pdev);
5259 struct amdgpu_device *adev = drm_to_adev(dev);
5261 struct amdgpu_reset_context reset_context;
5263 struct list_head device_list;
5265 DRM_INFO("PCI error: slot reset callback!!\n");
5267 memset(&reset_context, 0, sizeof(reset_context));
5269 INIT_LIST_HEAD(&device_list);
5270 list_add_tail(&adev->reset_list, &device_list);
5272 /* wait for asic to come out of reset */
5275 /* Restore PCI confspace */
5276 amdgpu_device_load_pci_state(pdev);
5278 /* confirm ASIC came out of reset */
5279 for (i = 0; i < adev->usec_timeout; i++) {
5280 memsize = amdgpu_asic_get_config_memsize(adev);
5282 if (memsize != 0xffffffff)
5286 if (memsize == 0xffffffff) {
5291 reset_context.method = AMD_RESET_METHOD_NONE;
5292 reset_context.reset_req_dev = adev;
5293 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5294 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5296 adev->in_pci_err_recovery = true;
5297 r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5298 adev->in_pci_err_recovery = false;
5302 r = amdgpu_do_asic_reset(&device_list, &reset_context);
5306 if (amdgpu_device_cache_pci_state(adev->pdev))
5307 pci_restore_state(adev->pdev);
5309 DRM_INFO("PCIe error recovery succeeded\n");
5311 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5312 amdgpu_device_unlock_adev(adev);
5315 return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5319 * amdgpu_pci_resume() - resume normal ops after PCI reset
5320 * @pdev: pointer to PCI device
5322 * Called when the error recovery driver tells us that its
5323 * OK to resume normal operation.
5325 void amdgpu_pci_resume(struct pci_dev *pdev)
5327 struct drm_device *dev = pci_get_drvdata(pdev);
5328 struct amdgpu_device *adev = drm_to_adev(dev);
5332 DRM_INFO("PCI error: resume callback!!\n");
5334 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5335 struct amdgpu_ring *ring = adev->rings[i];
5337 if (!ring || !ring->sched.thread)
5341 drm_sched_resubmit_jobs(&ring->sched);
5342 drm_sched_start(&ring->sched, true);
5345 amdgpu_device_unlock_adev(adev);
5348 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5350 struct drm_device *dev = pci_get_drvdata(pdev);
5351 struct amdgpu_device *adev = drm_to_adev(dev);
5354 r = pci_save_state(pdev);
5356 kfree(adev->pci_state);
5358 adev->pci_state = pci_store_saved_state(pdev);
5360 if (!adev->pci_state) {
5361 DRM_ERROR("Failed to store PCI saved state");
5365 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5372 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5374 struct drm_device *dev = pci_get_drvdata(pdev);
5375 struct amdgpu_device *adev = drm_to_adev(dev);
5378 if (!adev->pci_state)
5381 r = pci_load_saved_state(pdev, adev->pci_state);
5384 pci_restore_state(pdev);
5386 DRM_WARN("Failed to load PCI state, err:%d\n", r);