2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_probe_helper.h>
36 #include <drm/amdgpu_drm.h>
37 #include <linux/vgaarb.h>
38 #include <linux/vga_switcheroo.h>
39 #include <linux/efi.h>
41 #include "amdgpu_trace.h"
42 #include "amdgpu_i2c.h"
44 #include "amdgpu_atombios.h"
45 #include "amdgpu_atomfirmware.h"
47 #ifdef CONFIG_DRM_AMDGPU_SI
50 #ifdef CONFIG_DRM_AMDGPU_CIK
56 #include "bif/bif_4_1_d.h"
57 #include <linux/pci.h>
58 #include <linux/firmware.h>
59 #include "amdgpu_vf_error.h"
61 #include "amdgpu_amdkfd.h"
62 #include "amdgpu_pm.h"
64 #include "amdgpu_xgmi.h"
65 #include "amdgpu_ras.h"
66 #include "amdgpu_pmu.h"
67 #include "amdgpu_fru_eeprom.h"
68 #include "amdgpu_reset.h"
70 #include <linux/suspend.h>
71 #include <drm/task_barrier.h>
72 #include <linux/pm_runtime.h>
74 #include <drm/drm_drv.h>
76 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
77 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
78 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
79 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
80 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
85 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
86 MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
87 MODULE_FIRMWARE("amdgpu/yellow_carp_gpu_info.bin");
89 #define AMDGPU_RESUME_MS 2000
91 const char *amdgpu_asic_name[] = {
131 * DOC: pcie_replay_count
133 * The amdgpu driver provides a sysfs API for reporting the total number
134 * of PCIe replays (NAKs)
135 * The file pcie_replay_count is used for this and returns the total
136 * number of replays as a sum of the NAKs generated and NAKs received
139 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
140 struct device_attribute *attr, char *buf)
142 struct drm_device *ddev = dev_get_drvdata(dev);
143 struct amdgpu_device *adev = drm_to_adev(ddev);
144 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
146 return sysfs_emit(buf, "%llu\n", cnt);
149 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
150 amdgpu_device_get_pcie_replay_count, NULL);
152 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
157 * The amdgpu driver provides a sysfs API for reporting the product name
159 * The file serial_number is used for this and returns the product name
160 * as returned from the FRU.
161 * NOTE: This is only available for certain server cards
164 static ssize_t amdgpu_device_get_product_name(struct device *dev,
165 struct device_attribute *attr, char *buf)
167 struct drm_device *ddev = dev_get_drvdata(dev);
168 struct amdgpu_device *adev = drm_to_adev(ddev);
170 return sysfs_emit(buf, "%s\n", adev->product_name);
173 static DEVICE_ATTR(product_name, S_IRUGO,
174 amdgpu_device_get_product_name, NULL);
177 * DOC: product_number
179 * The amdgpu driver provides a sysfs API for reporting the part number
181 * The file serial_number is used for this and returns the part number
182 * as returned from the FRU.
183 * NOTE: This is only available for certain server cards
186 static ssize_t amdgpu_device_get_product_number(struct device *dev,
187 struct device_attribute *attr, char *buf)
189 struct drm_device *ddev = dev_get_drvdata(dev);
190 struct amdgpu_device *adev = drm_to_adev(ddev);
192 return sysfs_emit(buf, "%s\n", adev->product_number);
195 static DEVICE_ATTR(product_number, S_IRUGO,
196 amdgpu_device_get_product_number, NULL);
201 * The amdgpu driver provides a sysfs API for reporting the serial number
203 * The file serial_number is used for this and returns the serial number
204 * as returned from the FRU.
205 * NOTE: This is only available for certain server cards
208 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
209 struct device_attribute *attr, char *buf)
211 struct drm_device *ddev = dev_get_drvdata(dev);
212 struct amdgpu_device *adev = drm_to_adev(ddev);
214 return sysfs_emit(buf, "%s\n", adev->serial);
217 static DEVICE_ATTR(serial_number, S_IRUGO,
218 amdgpu_device_get_serial_number, NULL);
221 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
223 * @dev: drm_device pointer
225 * Returns true if the device is a dGPU with ATPX power control,
226 * otherwise return false.
228 bool amdgpu_device_supports_px(struct drm_device *dev)
230 struct amdgpu_device *adev = drm_to_adev(dev);
232 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
238 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
240 * @dev: drm_device pointer
242 * Returns true if the device is a dGPU with ACPI power control,
243 * otherwise return false.
245 bool amdgpu_device_supports_boco(struct drm_device *dev)
247 struct amdgpu_device *adev = drm_to_adev(dev);
250 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
256 * amdgpu_device_supports_baco - Does the device support BACO
258 * @dev: drm_device pointer
260 * Returns true if the device supporte BACO,
261 * otherwise return false.
263 bool amdgpu_device_supports_baco(struct drm_device *dev)
265 struct amdgpu_device *adev = drm_to_adev(dev);
267 return amdgpu_asic_supports_baco(adev);
271 * amdgpu_device_supports_smart_shift - Is the device dGPU with
272 * smart shift support
274 * @dev: drm_device pointer
276 * Returns true if the device is a dGPU with Smart Shift support,
277 * otherwise returns false.
279 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
281 return (amdgpu_device_supports_boco(dev) &&
282 amdgpu_acpi_is_power_shift_control_supported());
286 * VRAM access helper functions
290 * amdgpu_device_vram_access - read/write a buffer in vram
292 * @adev: amdgpu_device pointer
293 * @pos: offset of the buffer in vram
294 * @buf: virtual address of the buffer in system memory
295 * @size: read/write size, sizeof(@buf) must > @size
296 * @write: true - write to vram, otherwise - read from vram
298 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
299 uint32_t *buf, size_t size, bool write)
306 if (!drm_dev_enter(&adev->ddev, &idx))
310 last = min(pos + size, adev->gmc.visible_vram_size);
312 void __iomem *addr = adev->mman.aper_base_kaddr + pos;
313 size_t count = last - pos;
316 memcpy_toio(addr, buf, count);
318 amdgpu_device_flush_hdp(adev, NULL);
320 amdgpu_device_invalidate_hdp(adev, NULL);
322 memcpy_fromio(buf, addr, count);
334 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
335 for (last = pos + size; pos < last; pos += 4) {
336 uint32_t tmp = pos >> 31;
338 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
340 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
344 WREG32_NO_KIQ(mmMM_DATA, *buf++);
346 *buf++ = RREG32_NO_KIQ(mmMM_DATA);
348 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
357 * register access helper functions.
360 /* Check if hw access should be skipped because of hotplug or device error */
361 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
363 if (adev->no_hw_access)
366 #ifdef CONFIG_LOCKDEP
368 * This is a bit complicated to understand, so worth a comment. What we assert
369 * here is that the GPU reset is not running on another thread in parallel.
371 * For this we trylock the read side of the reset semaphore, if that succeeds
372 * we know that the reset is not running in paralell.
374 * If the trylock fails we assert that we are either already holding the read
375 * side of the lock or are the reset thread itself and hold the write side of
379 if (down_read_trylock(&adev->reset_sem))
380 up_read(&adev->reset_sem);
382 lockdep_assert_held(&adev->reset_sem);
389 * amdgpu_device_rreg - read a memory mapped IO or indirect register
391 * @adev: amdgpu_device pointer
392 * @reg: dword aligned register offset
393 * @acc_flags: access flags which require special behavior
395 * Returns the 32 bit value from the offset specified.
397 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
398 uint32_t reg, uint32_t acc_flags)
402 if (amdgpu_device_skip_hw_access(adev))
405 if ((reg * 4) < adev->rmmio_size) {
406 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
407 amdgpu_sriov_runtime(adev) &&
408 down_read_trylock(&adev->reset_sem)) {
409 ret = amdgpu_kiq_rreg(adev, reg);
410 up_read(&adev->reset_sem);
412 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
415 ret = adev->pcie_rreg(adev, reg * 4);
418 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
424 * MMIO register read with bytes helper functions
425 * @offset:bytes offset from MMIO start
430 * amdgpu_mm_rreg8 - read a memory mapped IO register
432 * @adev: amdgpu_device pointer
433 * @offset: byte aligned register offset
435 * Returns the 8 bit value from the offset specified.
437 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
439 if (amdgpu_device_skip_hw_access(adev))
442 if (offset < adev->rmmio_size)
443 return (readb(adev->rmmio + offset));
448 * MMIO register write with bytes helper functions
449 * @offset:bytes offset from MMIO start
450 * @value: the value want to be written to the register
454 * amdgpu_mm_wreg8 - read a memory mapped IO register
456 * @adev: amdgpu_device pointer
457 * @offset: byte aligned register offset
458 * @value: 8 bit value to write
460 * Writes the value specified to the offset specified.
462 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
464 if (amdgpu_device_skip_hw_access(adev))
467 if (offset < adev->rmmio_size)
468 writeb(value, adev->rmmio + offset);
474 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
476 * @adev: amdgpu_device pointer
477 * @reg: dword aligned register offset
478 * @v: 32 bit value to write to the register
479 * @acc_flags: access flags which require special behavior
481 * Writes the value specified to the offset specified.
483 void amdgpu_device_wreg(struct amdgpu_device *adev,
484 uint32_t reg, uint32_t v,
487 if (amdgpu_device_skip_hw_access(adev))
490 if ((reg * 4) < adev->rmmio_size) {
491 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
492 amdgpu_sriov_runtime(adev) &&
493 down_read_trylock(&adev->reset_sem)) {
494 amdgpu_kiq_wreg(adev, reg, v);
495 up_read(&adev->reset_sem);
497 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
500 adev->pcie_wreg(adev, reg * 4, v);
503 trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
507 * amdgpu_mm_wreg_mmio_rlc - write register either with mmio or with RLC path if in range
509 * this function is invoked only the debugfs register access
511 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
512 uint32_t reg, uint32_t v)
514 if (amdgpu_device_skip_hw_access(adev))
517 if (amdgpu_sriov_fullaccess(adev) &&
518 adev->gfx.rlc.funcs &&
519 adev->gfx.rlc.funcs->is_rlcg_access_range) {
520 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
521 return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v, 0, 0);
523 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
528 * amdgpu_mm_rdoorbell - read a doorbell dword
530 * @adev: amdgpu_device pointer
531 * @index: doorbell index
533 * Returns the value in the doorbell aperture at the
534 * requested doorbell index (CIK).
536 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
538 if (amdgpu_device_skip_hw_access(adev))
541 if (index < adev->doorbell.num_doorbells) {
542 return readl(adev->doorbell.ptr + index);
544 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
550 * amdgpu_mm_wdoorbell - write a doorbell dword
552 * @adev: amdgpu_device pointer
553 * @index: doorbell index
556 * Writes @v to the doorbell aperture at the
557 * requested doorbell index (CIK).
559 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
561 if (amdgpu_device_skip_hw_access(adev))
564 if (index < adev->doorbell.num_doorbells) {
565 writel(v, adev->doorbell.ptr + index);
567 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
572 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
574 * @adev: amdgpu_device pointer
575 * @index: doorbell index
577 * Returns the value in the doorbell aperture at the
578 * requested doorbell index (VEGA10+).
580 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
582 if (amdgpu_device_skip_hw_access(adev))
585 if (index < adev->doorbell.num_doorbells) {
586 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
588 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
594 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
596 * @adev: amdgpu_device pointer
597 * @index: doorbell index
600 * Writes @v to the doorbell aperture at the
601 * requested doorbell index (VEGA10+).
603 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
605 if (amdgpu_device_skip_hw_access(adev))
608 if (index < adev->doorbell.num_doorbells) {
609 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
611 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
616 * amdgpu_device_indirect_rreg - read an indirect register
618 * @adev: amdgpu_device pointer
619 * @pcie_index: mmio register offset
620 * @pcie_data: mmio register offset
621 * @reg_addr: indirect register address to read from
623 * Returns the value of indirect register @reg_addr
625 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
626 u32 pcie_index, u32 pcie_data,
631 void __iomem *pcie_index_offset;
632 void __iomem *pcie_data_offset;
634 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
635 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
636 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
638 writel(reg_addr, pcie_index_offset);
639 readl(pcie_index_offset);
640 r = readl(pcie_data_offset);
641 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
647 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
649 * @adev: amdgpu_device pointer
650 * @pcie_index: mmio register offset
651 * @pcie_data: mmio register offset
652 * @reg_addr: indirect register address to read from
654 * Returns the value of indirect register @reg_addr
656 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
657 u32 pcie_index, u32 pcie_data,
662 void __iomem *pcie_index_offset;
663 void __iomem *pcie_data_offset;
665 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
666 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
667 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
669 /* read low 32 bits */
670 writel(reg_addr, pcie_index_offset);
671 readl(pcie_index_offset);
672 r = readl(pcie_data_offset);
673 /* read high 32 bits */
674 writel(reg_addr + 4, pcie_index_offset);
675 readl(pcie_index_offset);
676 r |= ((u64)readl(pcie_data_offset) << 32);
677 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
683 * amdgpu_device_indirect_wreg - write an indirect register address
685 * @adev: amdgpu_device pointer
686 * @pcie_index: mmio register offset
687 * @pcie_data: mmio register offset
688 * @reg_addr: indirect register offset
689 * @reg_data: indirect register data
692 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
693 u32 pcie_index, u32 pcie_data,
694 u32 reg_addr, u32 reg_data)
697 void __iomem *pcie_index_offset;
698 void __iomem *pcie_data_offset;
700 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
701 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
702 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
704 writel(reg_addr, pcie_index_offset);
705 readl(pcie_index_offset);
706 writel(reg_data, pcie_data_offset);
707 readl(pcie_data_offset);
708 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
712 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
714 * @adev: amdgpu_device pointer
715 * @pcie_index: mmio register offset
716 * @pcie_data: mmio register offset
717 * @reg_addr: indirect register offset
718 * @reg_data: indirect register data
721 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
722 u32 pcie_index, u32 pcie_data,
723 u32 reg_addr, u64 reg_data)
726 void __iomem *pcie_index_offset;
727 void __iomem *pcie_data_offset;
729 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
730 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
731 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
733 /* write low 32 bits */
734 writel(reg_addr, pcie_index_offset);
735 readl(pcie_index_offset);
736 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
737 readl(pcie_data_offset);
738 /* write high 32 bits */
739 writel(reg_addr + 4, pcie_index_offset);
740 readl(pcie_index_offset);
741 writel((u32)(reg_data >> 32), pcie_data_offset);
742 readl(pcie_data_offset);
743 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
747 * amdgpu_invalid_rreg - dummy reg read function
749 * @adev: amdgpu_device pointer
750 * @reg: offset of register
752 * Dummy register read function. Used for register blocks
753 * that certain asics don't have (all asics).
754 * Returns the value in the register.
756 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
758 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
764 * amdgpu_invalid_wreg - dummy reg write function
766 * @adev: amdgpu_device pointer
767 * @reg: offset of register
768 * @v: value to write to the register
770 * Dummy register read function. Used for register blocks
771 * that certain asics don't have (all asics).
773 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
775 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
781 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
783 * @adev: amdgpu_device pointer
784 * @reg: offset of register
786 * Dummy register read function. Used for register blocks
787 * that certain asics don't have (all asics).
788 * Returns the value in the register.
790 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
792 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
798 * amdgpu_invalid_wreg64 - dummy reg write function
800 * @adev: amdgpu_device pointer
801 * @reg: offset of register
802 * @v: value to write to the register
804 * Dummy register read function. Used for register blocks
805 * that certain asics don't have (all asics).
807 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
809 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
815 * amdgpu_block_invalid_rreg - dummy reg read function
817 * @adev: amdgpu_device pointer
818 * @block: offset of instance
819 * @reg: offset of register
821 * Dummy register read function. Used for register blocks
822 * that certain asics don't have (all asics).
823 * Returns the value in the register.
825 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
826 uint32_t block, uint32_t reg)
828 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
835 * amdgpu_block_invalid_wreg - dummy reg write function
837 * @adev: amdgpu_device pointer
838 * @block: offset of instance
839 * @reg: offset of register
840 * @v: value to write to the register
842 * Dummy register read function. Used for register blocks
843 * that certain asics don't have (all asics).
845 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
847 uint32_t reg, uint32_t v)
849 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
855 * amdgpu_device_asic_init - Wrapper for atom asic_init
857 * @adev: amdgpu_device pointer
859 * Does any asic specific work and then calls atom asic init.
861 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
863 amdgpu_asic_pre_asic_init(adev);
865 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
869 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
871 * @adev: amdgpu_device pointer
873 * Allocates a scratch page of VRAM for use by various things in the
876 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
878 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
879 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
880 &adev->vram_scratch.robj,
881 &adev->vram_scratch.gpu_addr,
882 (void **)&adev->vram_scratch.ptr);
886 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
888 * @adev: amdgpu_device pointer
890 * Frees the VRAM scratch page.
892 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
894 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
898 * amdgpu_device_program_register_sequence - program an array of registers.
900 * @adev: amdgpu_device pointer
901 * @registers: pointer to the register array
902 * @array_size: size of the register array
904 * Programs an array or registers with and and or masks.
905 * This is a helper for setting golden registers.
907 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
908 const u32 *registers,
909 const u32 array_size)
911 u32 tmp, reg, and_mask, or_mask;
917 for (i = 0; i < array_size; i +=3) {
918 reg = registers[i + 0];
919 and_mask = registers[i + 1];
920 or_mask = registers[i + 2];
922 if (and_mask == 0xffffffff) {
927 if (adev->family >= AMDGPU_FAMILY_AI)
928 tmp |= (or_mask & and_mask);
937 * amdgpu_device_pci_config_reset - reset the GPU
939 * @adev: amdgpu_device pointer
941 * Resets the GPU using the pci config reset sequence.
942 * Only applicable to asics prior to vega10.
944 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
946 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
950 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
952 * @adev: amdgpu_device pointer
954 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
956 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
958 return pci_reset_function(adev->pdev);
962 * GPU doorbell aperture helpers function.
965 * amdgpu_device_doorbell_init - Init doorbell driver information.
967 * @adev: amdgpu_device pointer
969 * Init doorbell driver information (CIK)
970 * Returns 0 on success, error on failure.
972 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
975 /* No doorbell on SI hardware generation */
976 if (adev->asic_type < CHIP_BONAIRE) {
977 adev->doorbell.base = 0;
978 adev->doorbell.size = 0;
979 adev->doorbell.num_doorbells = 0;
980 adev->doorbell.ptr = NULL;
984 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
987 amdgpu_asic_init_doorbell_index(adev);
989 /* doorbell bar mapping */
990 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
991 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
993 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
994 adev->doorbell_index.max_assignment+1);
995 if (adev->doorbell.num_doorbells == 0)
998 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
999 * paging queue doorbell use the second page. The
1000 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1001 * doorbells are in the first page. So with paging queue enabled,
1002 * the max num_doorbells should + 1 page (0x400 in dword)
1004 if (adev->asic_type >= CHIP_VEGA10)
1005 adev->doorbell.num_doorbells += 0x400;
1007 adev->doorbell.ptr = ioremap(adev->doorbell.base,
1008 adev->doorbell.num_doorbells *
1010 if (adev->doorbell.ptr == NULL)
1017 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1019 * @adev: amdgpu_device pointer
1021 * Tear down doorbell driver information (CIK)
1023 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1025 iounmap(adev->doorbell.ptr);
1026 adev->doorbell.ptr = NULL;
1032 * amdgpu_device_wb_*()
1033 * Writeback is the method by which the GPU updates special pages in memory
1034 * with the status of certain GPU events (fences, ring pointers,etc.).
1038 * amdgpu_device_wb_fini - Disable Writeback and free memory
1040 * @adev: amdgpu_device pointer
1042 * Disables Writeback and frees the Writeback memory (all asics).
1043 * Used at driver shutdown.
1045 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1047 if (adev->wb.wb_obj) {
1048 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1050 (void **)&adev->wb.wb);
1051 adev->wb.wb_obj = NULL;
1056 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
1058 * @adev: amdgpu_device pointer
1060 * Initializes writeback and allocates writeback memory (all asics).
1061 * Used at driver startup.
1062 * Returns 0 on success or an -error on failure.
1064 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1068 if (adev->wb.wb_obj == NULL) {
1069 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1070 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1071 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1072 &adev->wb.wb_obj, &adev->wb.gpu_addr,
1073 (void **)&adev->wb.wb);
1075 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1079 adev->wb.num_wb = AMDGPU_MAX_WB;
1080 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1082 /* clear wb memory */
1083 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1090 * amdgpu_device_wb_get - Allocate a wb entry
1092 * @adev: amdgpu_device pointer
1095 * Allocate a wb slot for use by the driver (all asics).
1096 * Returns 0 on success or -EINVAL on failure.
1098 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1100 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1102 if (offset < adev->wb.num_wb) {
1103 __set_bit(offset, adev->wb.used);
1104 *wb = offset << 3; /* convert to dw offset */
1112 * amdgpu_device_wb_free - Free a wb entry
1114 * @adev: amdgpu_device pointer
1117 * Free a wb slot allocated for use by the driver (all asics)
1119 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1122 if (wb < adev->wb.num_wb)
1123 __clear_bit(wb, adev->wb.used);
1127 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1129 * @adev: amdgpu_device pointer
1131 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1132 * to fail, but if any of the BARs is not accessible after the size we abort
1133 * driver loading by returning -ENODEV.
1135 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1137 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1138 struct pci_bus *root;
1139 struct resource *res;
1145 if (amdgpu_sriov_vf(adev))
1148 /* skip if the bios has already enabled large BAR */
1149 if (adev->gmc.real_vram_size &&
1150 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1153 /* Check if the root BUS has 64bit memory resources */
1154 root = adev->pdev->bus;
1155 while (root->parent)
1156 root = root->parent;
1158 pci_bus_for_each_resource(root, res, i) {
1159 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1160 res->start > 0x100000000ull)
1164 /* Trying to resize is pointless without a root hub window above 4GB */
1168 /* Limit the BAR size to what is available */
1169 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1172 /* Disable memory decoding while we change the BAR addresses and size */
1173 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1174 pci_write_config_word(adev->pdev, PCI_COMMAND,
1175 cmd & ~PCI_COMMAND_MEMORY);
1177 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1178 amdgpu_device_doorbell_fini(adev);
1179 if (adev->asic_type >= CHIP_BONAIRE)
1180 pci_release_resource(adev->pdev, 2);
1182 pci_release_resource(adev->pdev, 0);
1184 r = pci_resize_resource(adev->pdev, 0, rbar_size);
1186 DRM_INFO("Not enough PCI address space for a large BAR.");
1187 else if (r && r != -ENOTSUPP)
1188 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1190 pci_assign_unassigned_bus_resources(adev->pdev->bus);
1192 /* When the doorbell or fb BAR isn't available we have no chance of
1195 r = amdgpu_device_doorbell_init(adev);
1196 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1199 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1205 * GPU helpers function.
1208 * amdgpu_device_need_post - check if the hw need post or not
1210 * @adev: amdgpu_device pointer
1212 * Check if the asic has been initialized (all asics) at driver startup
1213 * or post is needed if hw reset is performed.
1214 * Returns true if need or false if not.
1216 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1220 if (amdgpu_sriov_vf(adev))
1223 if (amdgpu_passthrough(adev)) {
1224 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1225 * some old smc fw still need driver do vPost otherwise gpu hang, while
1226 * those smc fw version above 22.15 doesn't have this flaw, so we force
1227 * vpost executed for smc version below 22.15
1229 if (adev->asic_type == CHIP_FIJI) {
1232 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1233 /* force vPost if error occured */
1237 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1238 if (fw_ver < 0x00160e00)
1243 /* Don't post if we need to reset whole hive on init */
1244 if (adev->gmc.xgmi.pending_reset)
1247 if (adev->has_hw_reset) {
1248 adev->has_hw_reset = false;
1252 /* bios scratch used on CIK+ */
1253 if (adev->asic_type >= CHIP_BONAIRE)
1254 return amdgpu_atombios_scratch_need_asic_init(adev);
1256 /* check MEM_SIZE for older asics */
1257 reg = amdgpu_asic_get_config_memsize(adev);
1259 if ((reg != 0) && (reg != 0xffffffff))
1265 /* if we get transitioned to only one device, take VGA back */
1267 * amdgpu_device_vga_set_decode - enable/disable vga decode
1269 * @cookie: amdgpu_device pointer
1270 * @state: enable/disable vga decode
1272 * Enable/disable vga decode (all asics).
1273 * Returns VGA resource flags.
1275 static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
1277 struct amdgpu_device *adev = cookie;
1278 amdgpu_asic_set_vga_state(adev, state);
1280 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1281 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1283 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1287 * amdgpu_device_check_block_size - validate the vm block size
1289 * @adev: amdgpu_device pointer
1291 * Validates the vm block size specified via module parameter.
1292 * The vm block size defines number of bits in page table versus page directory,
1293 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1294 * page table and the remaining bits are in the page directory.
1296 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1298 /* defines number of bits in page table versus page directory,
1299 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1300 * page table and the remaining bits are in the page directory */
1301 if (amdgpu_vm_block_size == -1)
1304 if (amdgpu_vm_block_size < 9) {
1305 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1306 amdgpu_vm_block_size);
1307 amdgpu_vm_block_size = -1;
1312 * amdgpu_device_check_vm_size - validate the vm size
1314 * @adev: amdgpu_device pointer
1316 * Validates the vm size in GB specified via module parameter.
1317 * The VM size is the size of the GPU virtual memory space in GB.
1319 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1321 /* no need to check the default value */
1322 if (amdgpu_vm_size == -1)
1325 if (amdgpu_vm_size < 1) {
1326 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1328 amdgpu_vm_size = -1;
1332 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1335 bool is_os_64 = (sizeof(void *) == 8);
1336 uint64_t total_memory;
1337 uint64_t dram_size_seven_GB = 0x1B8000000;
1338 uint64_t dram_size_three_GB = 0xB8000000;
1340 if (amdgpu_smu_memory_pool_size == 0)
1344 DRM_WARN("Not 64-bit OS, feature not supported\n");
1348 total_memory = (uint64_t)si.totalram * si.mem_unit;
1350 if ((amdgpu_smu_memory_pool_size == 1) ||
1351 (amdgpu_smu_memory_pool_size == 2)) {
1352 if (total_memory < dram_size_three_GB)
1354 } else if ((amdgpu_smu_memory_pool_size == 4) ||
1355 (amdgpu_smu_memory_pool_size == 8)) {
1356 if (total_memory < dram_size_seven_GB)
1359 DRM_WARN("Smu memory pool size not supported\n");
1362 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1367 DRM_WARN("No enough system memory\n");
1369 adev->pm.smu_prv_buffer_size = 0;
1372 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1374 if (!(adev->flags & AMD_IS_APU) ||
1375 adev->asic_type < CHIP_RAVEN)
1378 switch (adev->asic_type) {
1380 if (adev->pdev->device == 0x15dd)
1381 adev->apu_flags |= AMD_APU_IS_RAVEN;
1382 if (adev->pdev->device == 0x15d8)
1383 adev->apu_flags |= AMD_APU_IS_PICASSO;
1386 if ((adev->pdev->device == 0x1636) ||
1387 (adev->pdev->device == 0x164c))
1388 adev->apu_flags |= AMD_APU_IS_RENOIR;
1390 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1393 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1395 case CHIP_YELLOW_CARP:
1405 * amdgpu_device_check_arguments - validate module params
1407 * @adev: amdgpu_device pointer
1409 * Validates certain module parameters and updates
1410 * the associated values used by the driver (all asics).
1412 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1414 if (amdgpu_sched_jobs < 4) {
1415 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1417 amdgpu_sched_jobs = 4;
1418 } else if (!is_power_of_2(amdgpu_sched_jobs)){
1419 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1421 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1424 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1425 /* gart size must be greater or equal to 32M */
1426 dev_warn(adev->dev, "gart size (%d) too small\n",
1428 amdgpu_gart_size = -1;
1431 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1432 /* gtt size must be greater or equal to 32M */
1433 dev_warn(adev->dev, "gtt size (%d) too small\n",
1435 amdgpu_gtt_size = -1;
1438 /* valid range is between 4 and 9 inclusive */
1439 if (amdgpu_vm_fragment_size != -1 &&
1440 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1441 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1442 amdgpu_vm_fragment_size = -1;
1445 if (amdgpu_sched_hw_submission < 2) {
1446 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1447 amdgpu_sched_hw_submission);
1448 amdgpu_sched_hw_submission = 2;
1449 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1450 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1451 amdgpu_sched_hw_submission);
1452 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1455 amdgpu_device_check_smu_prv_buffer_size(adev);
1457 amdgpu_device_check_vm_size(adev);
1459 amdgpu_device_check_block_size(adev);
1461 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1463 amdgpu_gmc_tmz_set(adev);
1465 amdgpu_gmc_noretry_set(adev);
1471 * amdgpu_switcheroo_set_state - set switcheroo state
1473 * @pdev: pci dev pointer
1474 * @state: vga_switcheroo state
1476 * Callback for the switcheroo driver. Suspends or resumes the
1477 * the asics before or after it is powered up using ACPI methods.
1479 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1480 enum vga_switcheroo_state state)
1482 struct drm_device *dev = pci_get_drvdata(pdev);
1485 if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1488 if (state == VGA_SWITCHEROO_ON) {
1489 pr_info("switched on\n");
1490 /* don't suspend or resume card normally */
1491 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1493 pci_set_power_state(pdev, PCI_D0);
1494 amdgpu_device_load_pci_state(pdev);
1495 r = pci_enable_device(pdev);
1497 DRM_WARN("pci_enable_device failed (%d)\n", r);
1498 amdgpu_device_resume(dev, true);
1500 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1502 pr_info("switched off\n");
1503 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1504 amdgpu_device_suspend(dev, true);
1505 amdgpu_device_cache_pci_state(pdev);
1506 /* Shut down the device */
1507 pci_disable_device(pdev);
1508 pci_set_power_state(pdev, PCI_D3cold);
1509 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1514 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1516 * @pdev: pci dev pointer
1518 * Callback for the switcheroo driver. Check of the switcheroo
1519 * state can be changed.
1520 * Returns true if the state can be changed, false if not.
1522 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1524 struct drm_device *dev = pci_get_drvdata(pdev);
1527 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1528 * locking inversion with the driver load path. And the access here is
1529 * completely racy anyway. So don't bother with locking for now.
1531 return atomic_read(&dev->open_count) == 0;
1534 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1535 .set_gpu_state = amdgpu_switcheroo_set_state,
1537 .can_switch = amdgpu_switcheroo_can_switch,
1541 * amdgpu_device_ip_set_clockgating_state - set the CG state
1543 * @dev: amdgpu_device pointer
1544 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1545 * @state: clockgating state (gate or ungate)
1547 * Sets the requested clockgating state for all instances of
1548 * the hardware IP specified.
1549 * Returns the error code from the last instance.
1551 int amdgpu_device_ip_set_clockgating_state(void *dev,
1552 enum amd_ip_block_type block_type,
1553 enum amd_clockgating_state state)
1555 struct amdgpu_device *adev = dev;
1558 for (i = 0; i < adev->num_ip_blocks; i++) {
1559 if (!adev->ip_blocks[i].status.valid)
1561 if (adev->ip_blocks[i].version->type != block_type)
1563 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1565 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1566 (void *)adev, state);
1568 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1569 adev->ip_blocks[i].version->funcs->name, r);
1575 * amdgpu_device_ip_set_powergating_state - set the PG state
1577 * @dev: amdgpu_device pointer
1578 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1579 * @state: powergating state (gate or ungate)
1581 * Sets the requested powergating state for all instances of
1582 * the hardware IP specified.
1583 * Returns the error code from the last instance.
1585 int amdgpu_device_ip_set_powergating_state(void *dev,
1586 enum amd_ip_block_type block_type,
1587 enum amd_powergating_state state)
1589 struct amdgpu_device *adev = dev;
1592 for (i = 0; i < adev->num_ip_blocks; i++) {
1593 if (!adev->ip_blocks[i].status.valid)
1595 if (adev->ip_blocks[i].version->type != block_type)
1597 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1599 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1600 (void *)adev, state);
1602 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1603 adev->ip_blocks[i].version->funcs->name, r);
1609 * amdgpu_device_ip_get_clockgating_state - get the CG state
1611 * @adev: amdgpu_device pointer
1612 * @flags: clockgating feature flags
1614 * Walks the list of IPs on the device and updates the clockgating
1615 * flags for each IP.
1616 * Updates @flags with the feature flags for each hardware IP where
1617 * clockgating is enabled.
1619 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1624 for (i = 0; i < adev->num_ip_blocks; i++) {
1625 if (!adev->ip_blocks[i].status.valid)
1627 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1628 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1633 * amdgpu_device_ip_wait_for_idle - wait for idle
1635 * @adev: amdgpu_device pointer
1636 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1638 * Waits for the request hardware IP to be idle.
1639 * Returns 0 for success or a negative error code on failure.
1641 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1642 enum amd_ip_block_type block_type)
1646 for (i = 0; i < adev->num_ip_blocks; i++) {
1647 if (!adev->ip_blocks[i].status.valid)
1649 if (adev->ip_blocks[i].version->type == block_type) {
1650 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1661 * amdgpu_device_ip_is_idle - is the hardware IP idle
1663 * @adev: amdgpu_device pointer
1664 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1666 * Check if the hardware IP is idle or not.
1667 * Returns true if it the IP is idle, false if not.
1669 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1670 enum amd_ip_block_type block_type)
1674 for (i = 0; i < adev->num_ip_blocks; i++) {
1675 if (!adev->ip_blocks[i].status.valid)
1677 if (adev->ip_blocks[i].version->type == block_type)
1678 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1685 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1687 * @adev: amdgpu_device pointer
1688 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1690 * Returns a pointer to the hardware IP block structure
1691 * if it exists for the asic, otherwise NULL.
1693 struct amdgpu_ip_block *
1694 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1695 enum amd_ip_block_type type)
1699 for (i = 0; i < adev->num_ip_blocks; i++)
1700 if (adev->ip_blocks[i].version->type == type)
1701 return &adev->ip_blocks[i];
1707 * amdgpu_device_ip_block_version_cmp
1709 * @adev: amdgpu_device pointer
1710 * @type: enum amd_ip_block_type
1711 * @major: major version
1712 * @minor: minor version
1714 * return 0 if equal or greater
1715 * return 1 if smaller or the ip_block doesn't exist
1717 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1718 enum amd_ip_block_type type,
1719 u32 major, u32 minor)
1721 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1723 if (ip_block && ((ip_block->version->major > major) ||
1724 ((ip_block->version->major == major) &&
1725 (ip_block->version->minor >= minor))))
1732 * amdgpu_device_ip_block_add
1734 * @adev: amdgpu_device pointer
1735 * @ip_block_version: pointer to the IP to add
1737 * Adds the IP block driver information to the collection of IPs
1740 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1741 const struct amdgpu_ip_block_version *ip_block_version)
1743 if (!ip_block_version)
1746 switch (ip_block_version->type) {
1747 case AMD_IP_BLOCK_TYPE_VCN:
1748 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1751 case AMD_IP_BLOCK_TYPE_JPEG:
1752 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1759 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1760 ip_block_version->funcs->name);
1762 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1768 * amdgpu_device_enable_virtual_display - enable virtual display feature
1770 * @adev: amdgpu_device pointer
1772 * Enabled the virtual display feature if the user has enabled it via
1773 * the module parameter virtual_display. This feature provides a virtual
1774 * display hardware on headless boards or in virtualized environments.
1775 * This function parses and validates the configuration string specified by
1776 * the user and configues the virtual display configuration (number of
1777 * virtual connectors, crtcs, etc.) specified.
1779 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1781 adev->enable_virtual_display = false;
1783 if (amdgpu_virtual_display) {
1784 const char *pci_address_name = pci_name(adev->pdev);
1785 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1787 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1788 pciaddstr_tmp = pciaddstr;
1789 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1790 pciaddname = strsep(&pciaddname_tmp, ",");
1791 if (!strcmp("all", pciaddname)
1792 || !strcmp(pci_address_name, pciaddname)) {
1796 adev->enable_virtual_display = true;
1799 res = kstrtol(pciaddname_tmp, 10,
1807 adev->mode_info.num_crtc = num_crtc;
1809 adev->mode_info.num_crtc = 1;
1815 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1816 amdgpu_virtual_display, pci_address_name,
1817 adev->enable_virtual_display, adev->mode_info.num_crtc);
1824 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1826 * @adev: amdgpu_device pointer
1828 * Parses the asic configuration parameters specified in the gpu info
1829 * firmware and makes them availale to the driver for use in configuring
1831 * Returns 0 on success, -EINVAL on failure.
1833 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1835 const char *chip_name;
1838 const struct gpu_info_firmware_header_v1_0 *hdr;
1840 adev->firmware.gpu_info_fw = NULL;
1842 if (adev->mman.discovery_bin) {
1843 amdgpu_discovery_get_gfx_info(adev);
1846 * FIXME: The bounding box is still needed by Navi12, so
1847 * temporarily read it from gpu_info firmware. Should be droped
1848 * when DAL no longer needs it.
1850 if (adev->asic_type != CHIP_NAVI12)
1854 switch (adev->asic_type) {
1855 #ifdef CONFIG_DRM_AMDGPU_SI
1862 #ifdef CONFIG_DRM_AMDGPU_CIK
1872 case CHIP_POLARIS10:
1873 case CHIP_POLARIS11:
1874 case CHIP_POLARIS12:
1879 case CHIP_ALDEBARAN:
1880 case CHIP_SIENNA_CICHLID:
1881 case CHIP_NAVY_FLOUNDER:
1882 case CHIP_DIMGREY_CAVEFISH:
1883 case CHIP_BEIGE_GOBY:
1887 chip_name = "vega10";
1890 chip_name = "vega12";
1893 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1894 chip_name = "raven2";
1895 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1896 chip_name = "picasso";
1898 chip_name = "raven";
1901 chip_name = "arcturus";
1904 if (adev->apu_flags & AMD_APU_IS_RENOIR)
1905 chip_name = "renoir";
1907 chip_name = "green_sardine";
1910 chip_name = "navi10";
1913 chip_name = "navi14";
1916 chip_name = "navi12";
1919 chip_name = "vangogh";
1921 case CHIP_YELLOW_CARP:
1922 chip_name = "yellow_carp";
1926 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1927 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1930 "Failed to load gpu_info firmware \"%s\"\n",
1934 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1937 "Failed to validate gpu_info firmware \"%s\"\n",
1942 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1943 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1945 switch (hdr->version_major) {
1948 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1949 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1950 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1953 * Should be droped when DAL no longer needs it.
1955 if (adev->asic_type == CHIP_NAVI12)
1956 goto parse_soc_bounding_box;
1958 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1959 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1960 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1961 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1962 adev->gfx.config.max_texture_channel_caches =
1963 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1964 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1965 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1966 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1967 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1968 adev->gfx.config.double_offchip_lds_buf =
1969 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1970 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1971 adev->gfx.cu_info.max_waves_per_simd =
1972 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1973 adev->gfx.cu_info.max_scratch_slots_per_cu =
1974 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1975 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1976 if (hdr->version_minor >= 1) {
1977 const struct gpu_info_firmware_v1_1 *gpu_info_fw =
1978 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
1979 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1980 adev->gfx.config.num_sc_per_sh =
1981 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
1982 adev->gfx.config.num_packer_per_sc =
1983 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
1986 parse_soc_bounding_box:
1988 * soc bounding box info is not integrated in disocovery table,
1989 * we always need to parse it from gpu info firmware if needed.
1991 if (hdr->version_minor == 2) {
1992 const struct gpu_info_firmware_v1_2 *gpu_info_fw =
1993 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
1994 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1995 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2001 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2010 * amdgpu_device_ip_early_init - run early init for hardware IPs
2012 * @adev: amdgpu_device pointer
2014 * Early initialization pass for hardware IPs. The hardware IPs that make
2015 * up each asic are discovered each IP's early_init callback is run. This
2016 * is the first stage in initializing the asic.
2017 * Returns 0 on success, negative error code on failure.
2019 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2023 amdgpu_device_enable_virtual_display(adev);
2025 if (amdgpu_sriov_vf(adev)) {
2026 r = amdgpu_virt_request_full_gpu(adev, true);
2031 switch (adev->asic_type) {
2032 #ifdef CONFIG_DRM_AMDGPU_SI
2038 adev->family = AMDGPU_FAMILY_SI;
2039 r = si_set_ip_blocks(adev);
2044 #ifdef CONFIG_DRM_AMDGPU_CIK
2050 if (adev->flags & AMD_IS_APU)
2051 adev->family = AMDGPU_FAMILY_KV;
2053 adev->family = AMDGPU_FAMILY_CI;
2055 r = cik_set_ip_blocks(adev);
2063 case CHIP_POLARIS10:
2064 case CHIP_POLARIS11:
2065 case CHIP_POLARIS12:
2069 if (adev->flags & AMD_IS_APU)
2070 adev->family = AMDGPU_FAMILY_CZ;
2072 adev->family = AMDGPU_FAMILY_VI;
2074 r = vi_set_ip_blocks(adev);
2084 case CHIP_ALDEBARAN:
2085 if (adev->flags & AMD_IS_APU)
2086 adev->family = AMDGPU_FAMILY_RV;
2088 adev->family = AMDGPU_FAMILY_AI;
2090 r = soc15_set_ip_blocks(adev);
2097 case CHIP_SIENNA_CICHLID:
2098 case CHIP_NAVY_FLOUNDER:
2099 case CHIP_DIMGREY_CAVEFISH:
2100 case CHIP_BEIGE_GOBY:
2102 case CHIP_YELLOW_CARP:
2103 if (adev->asic_type == CHIP_VANGOGH)
2104 adev->family = AMDGPU_FAMILY_VGH;
2105 else if (adev->asic_type == CHIP_YELLOW_CARP)
2106 adev->family = AMDGPU_FAMILY_YC;
2108 adev->family = AMDGPU_FAMILY_NV;
2110 r = nv_set_ip_blocks(adev);
2115 /* FIXME: not supported yet */
2119 amdgpu_amdkfd_device_probe(adev);
2121 adev->pm.pp_feature = amdgpu_pp_feature_mask;
2122 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2123 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2124 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2125 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2127 for (i = 0; i < adev->num_ip_blocks; i++) {
2128 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2129 DRM_ERROR("disabled ip block: %d <%s>\n",
2130 i, adev->ip_blocks[i].version->funcs->name);
2131 adev->ip_blocks[i].status.valid = false;
2133 if (adev->ip_blocks[i].version->funcs->early_init) {
2134 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2136 adev->ip_blocks[i].status.valid = false;
2138 DRM_ERROR("early_init of IP block <%s> failed %d\n",
2139 adev->ip_blocks[i].version->funcs->name, r);
2142 adev->ip_blocks[i].status.valid = true;
2145 adev->ip_blocks[i].status.valid = true;
2148 /* get the vbios after the asic_funcs are set up */
2149 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2150 r = amdgpu_device_parse_gpu_info_fw(adev);
2155 if (!amdgpu_get_bios(adev))
2158 r = amdgpu_atombios_init(adev);
2160 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2161 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2165 /*get pf2vf msg info at it's earliest time*/
2166 if (amdgpu_sriov_vf(adev))
2167 amdgpu_virt_init_data_exchange(adev);
2172 adev->cg_flags &= amdgpu_cg_mask;
2173 adev->pg_flags &= amdgpu_pg_mask;
2178 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2182 for (i = 0; i < adev->num_ip_blocks; i++) {
2183 if (!adev->ip_blocks[i].status.sw)
2185 if (adev->ip_blocks[i].status.hw)
2187 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2188 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2189 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2190 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2192 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2193 adev->ip_blocks[i].version->funcs->name, r);
2196 adev->ip_blocks[i].status.hw = true;
2203 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2207 for (i = 0; i < adev->num_ip_blocks; i++) {
2208 if (!adev->ip_blocks[i].status.sw)
2210 if (adev->ip_blocks[i].status.hw)
2212 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2214 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2215 adev->ip_blocks[i].version->funcs->name, r);
2218 adev->ip_blocks[i].status.hw = true;
2224 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2228 uint32_t smu_version;
2230 if (adev->asic_type >= CHIP_VEGA10) {
2231 for (i = 0; i < adev->num_ip_blocks; i++) {
2232 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2235 if (!adev->ip_blocks[i].status.sw)
2238 /* no need to do the fw loading again if already done*/
2239 if (adev->ip_blocks[i].status.hw == true)
2242 if (amdgpu_in_reset(adev) || adev->in_suspend) {
2243 r = adev->ip_blocks[i].version->funcs->resume(adev);
2245 DRM_ERROR("resume of IP block <%s> failed %d\n",
2246 adev->ip_blocks[i].version->funcs->name, r);
2250 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2252 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2253 adev->ip_blocks[i].version->funcs->name, r);
2258 adev->ip_blocks[i].status.hw = true;
2263 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2264 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2270 * amdgpu_device_ip_init - run init for hardware IPs
2272 * @adev: amdgpu_device pointer
2274 * Main initialization pass for hardware IPs. The list of all the hardware
2275 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2276 * are run. sw_init initializes the software state associated with each IP
2277 * and hw_init initializes the hardware associated with each IP.
2278 * Returns 0 on success, negative error code on failure.
2280 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2284 r = amdgpu_ras_init(adev);
2288 for (i = 0; i < adev->num_ip_blocks; i++) {
2289 if (!adev->ip_blocks[i].status.valid)
2291 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2293 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2294 adev->ip_blocks[i].version->funcs->name, r);
2297 adev->ip_blocks[i].status.sw = true;
2299 /* need to do gmc hw init early so we can allocate gpu mem */
2300 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2301 r = amdgpu_device_vram_scratch_init(adev);
2303 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2306 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2308 DRM_ERROR("hw_init %d failed %d\n", i, r);
2311 r = amdgpu_device_wb_init(adev);
2313 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2316 adev->ip_blocks[i].status.hw = true;
2318 /* right after GMC hw init, we create CSA */
2319 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2320 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2321 AMDGPU_GEM_DOMAIN_VRAM,
2324 DRM_ERROR("allocate CSA failed %d\n", r);
2331 if (amdgpu_sriov_vf(adev))
2332 amdgpu_virt_init_data_exchange(adev);
2334 r = amdgpu_ib_pool_init(adev);
2336 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2337 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2341 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2345 r = amdgpu_device_ip_hw_init_phase1(adev);
2349 r = amdgpu_device_fw_loading(adev);
2353 r = amdgpu_device_ip_hw_init_phase2(adev);
2358 * retired pages will be loaded from eeprom and reserved here,
2359 * it should be called after amdgpu_device_ip_hw_init_phase2 since
2360 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2361 * for I2C communication which only true at this point.
2363 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2364 * failure from bad gpu situation and stop amdgpu init process
2365 * accordingly. For other failed cases, it will still release all
2366 * the resource and print error message, rather than returning one
2367 * negative value to upper level.
2369 * Note: theoretically, this should be called before all vram allocations
2370 * to protect retired page from abusing
2372 r = amdgpu_ras_recovery_init(adev);
2376 if (adev->gmc.xgmi.num_physical_nodes > 1)
2377 amdgpu_xgmi_add_device(adev);
2379 /* Don't init kfd if whole hive need to be reset during init */
2380 if (!adev->gmc.xgmi.pending_reset)
2381 amdgpu_amdkfd_device_init(adev);
2383 amdgpu_fru_get_product_info(adev);
2386 if (amdgpu_sriov_vf(adev))
2387 amdgpu_virt_release_full_gpu(adev, true);
2393 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2395 * @adev: amdgpu_device pointer
2397 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
2398 * this function before a GPU reset. If the value is retained after a
2399 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
2401 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2403 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2407 * amdgpu_device_check_vram_lost - check if vram is valid
2409 * @adev: amdgpu_device pointer
2411 * Checks the reset magic value written to the gart pointer in VRAM.
2412 * The driver calls this after a GPU reset to see if the contents of
2413 * VRAM is lost or now.
2414 * returns true if vram is lost, false if not.
2416 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2418 if (memcmp(adev->gart.ptr, adev->reset_magic,
2419 AMDGPU_RESET_MAGIC_NUM))
2422 if (!amdgpu_in_reset(adev))
2426 * For all ASICs with baco/mode1 reset, the VRAM is
2427 * always assumed to be lost.
2429 switch (amdgpu_asic_reset_method(adev)) {
2430 case AMD_RESET_METHOD_BACO:
2431 case AMD_RESET_METHOD_MODE1:
2439 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2441 * @adev: amdgpu_device pointer
2442 * @state: clockgating state (gate or ungate)
2444 * The list of all the hardware IPs that make up the asic is walked and the
2445 * set_clockgating_state callbacks are run.
2446 * Late initialization pass enabling clockgating for hardware IPs.
2447 * Fini or suspend, pass disabling clockgating for hardware IPs.
2448 * Returns 0 on success, negative error code on failure.
2451 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2452 enum amd_clockgating_state state)
2456 if (amdgpu_emu_mode == 1)
2459 for (j = 0; j < adev->num_ip_blocks; j++) {
2460 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2461 if (!adev->ip_blocks[i].status.late_initialized)
2463 /* skip CG for GFX on S0ix */
2464 if (adev->in_s0ix &&
2465 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2467 /* skip CG for VCE/UVD, it's handled specially */
2468 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2469 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2470 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2471 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2472 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2473 /* enable clockgating to save power */
2474 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2477 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2478 adev->ip_blocks[i].version->funcs->name, r);
2487 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2488 enum amd_powergating_state state)
2492 if (amdgpu_emu_mode == 1)
2495 for (j = 0; j < adev->num_ip_blocks; j++) {
2496 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2497 if (!adev->ip_blocks[i].status.late_initialized)
2499 /* skip PG for GFX on S0ix */
2500 if (adev->in_s0ix &&
2501 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2503 /* skip CG for VCE/UVD, it's handled specially */
2504 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2505 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2506 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2507 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2508 adev->ip_blocks[i].version->funcs->set_powergating_state) {
2509 /* enable powergating to save power */
2510 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2513 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2514 adev->ip_blocks[i].version->funcs->name, r);
2522 static int amdgpu_device_enable_mgpu_fan_boost(void)
2524 struct amdgpu_gpu_instance *gpu_ins;
2525 struct amdgpu_device *adev;
2528 mutex_lock(&mgpu_info.mutex);
2531 * MGPU fan boost feature should be enabled
2532 * only when there are two or more dGPUs in
2535 if (mgpu_info.num_dgpu < 2)
2538 for (i = 0; i < mgpu_info.num_dgpu; i++) {
2539 gpu_ins = &(mgpu_info.gpu_ins[i]);
2540 adev = gpu_ins->adev;
2541 if (!(adev->flags & AMD_IS_APU) &&
2542 !gpu_ins->mgpu_fan_enabled) {
2543 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2547 gpu_ins->mgpu_fan_enabled = 1;
2552 mutex_unlock(&mgpu_info.mutex);
2558 * amdgpu_device_ip_late_init - run late init for hardware IPs
2560 * @adev: amdgpu_device pointer
2562 * Late initialization pass for hardware IPs. The list of all the hardware
2563 * IPs that make up the asic is walked and the late_init callbacks are run.
2564 * late_init covers any special initialization that an IP requires
2565 * after all of the have been initialized or something that needs to happen
2566 * late in the init process.
2567 * Returns 0 on success, negative error code on failure.
2569 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2571 struct amdgpu_gpu_instance *gpu_instance;
2574 for (i = 0; i < adev->num_ip_blocks; i++) {
2575 if (!adev->ip_blocks[i].status.hw)
2577 if (adev->ip_blocks[i].version->funcs->late_init) {
2578 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2580 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2581 adev->ip_blocks[i].version->funcs->name, r);
2585 adev->ip_blocks[i].status.late_initialized = true;
2588 amdgpu_ras_set_error_query_ready(adev, true);
2590 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2591 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2593 amdgpu_device_fill_reset_magic(adev);
2595 r = amdgpu_device_enable_mgpu_fan_boost();
2597 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2599 /* For XGMI + passthrough configuration on arcturus, enable light SBR */
2600 if (adev->asic_type == CHIP_ARCTURUS &&
2601 amdgpu_passthrough(adev) &&
2602 adev->gmc.xgmi.num_physical_nodes > 1)
2603 smu_set_light_sbr(&adev->smu, true);
2605 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2606 mutex_lock(&mgpu_info.mutex);
2609 * Reset device p-state to low as this was booted with high.
2611 * This should be performed only after all devices from the same
2612 * hive get initialized.
2614 * However, it's unknown how many device in the hive in advance.
2615 * As this is counted one by one during devices initializations.
2617 * So, we wait for all XGMI interlinked devices initialized.
2618 * This may bring some delays as those devices may come from
2619 * different hives. But that should be OK.
2621 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2622 for (i = 0; i < mgpu_info.num_gpu; i++) {
2623 gpu_instance = &(mgpu_info.gpu_ins[i]);
2624 if (gpu_instance->adev->flags & AMD_IS_APU)
2627 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2628 AMDGPU_XGMI_PSTATE_MIN);
2630 DRM_ERROR("pstate setting failed (%d).\n", r);
2636 mutex_unlock(&mgpu_info.mutex);
2642 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2646 for (i = 0; i < adev->num_ip_blocks; i++) {
2647 if (!adev->ip_blocks[i].version->funcs->early_fini)
2650 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2652 DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2653 adev->ip_blocks[i].version->funcs->name, r);
2657 amdgpu_amdkfd_suspend(adev, false);
2659 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2660 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2662 /* need to disable SMC first */
2663 for (i = 0; i < adev->num_ip_blocks; i++) {
2664 if (!adev->ip_blocks[i].status.hw)
2666 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2667 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2668 /* XXX handle errors */
2670 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2671 adev->ip_blocks[i].version->funcs->name, r);
2673 adev->ip_blocks[i].status.hw = false;
2678 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2679 if (!adev->ip_blocks[i].status.hw)
2682 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2683 /* XXX handle errors */
2685 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2686 adev->ip_blocks[i].version->funcs->name, r);
2689 adev->ip_blocks[i].status.hw = false;
2696 * amdgpu_device_ip_fini - run fini for hardware IPs
2698 * @adev: amdgpu_device pointer
2700 * Main teardown pass for hardware IPs. The list of all the hardware
2701 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2702 * are run. hw_fini tears down the hardware associated with each IP
2703 * and sw_fini tears down any software state associated with each IP.
2704 * Returns 0 on success, negative error code on failure.
2706 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2710 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2711 amdgpu_virt_release_ras_err_handler_data(adev);
2713 amdgpu_ras_pre_fini(adev);
2715 if (adev->gmc.xgmi.num_physical_nodes > 1)
2716 amdgpu_xgmi_remove_device(adev);
2718 amdgpu_amdkfd_device_fini_sw(adev);
2720 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2721 if (!adev->ip_blocks[i].status.sw)
2724 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2725 amdgpu_ucode_free_bo(adev);
2726 amdgpu_free_static_csa(&adev->virt.csa_obj);
2727 amdgpu_device_wb_fini(adev);
2728 amdgpu_device_vram_scratch_fini(adev);
2729 amdgpu_ib_pool_fini(adev);
2732 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2733 /* XXX handle errors */
2735 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2736 adev->ip_blocks[i].version->funcs->name, r);
2738 adev->ip_blocks[i].status.sw = false;
2739 adev->ip_blocks[i].status.valid = false;
2742 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2743 if (!adev->ip_blocks[i].status.late_initialized)
2745 if (adev->ip_blocks[i].version->funcs->late_fini)
2746 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2747 adev->ip_blocks[i].status.late_initialized = false;
2750 amdgpu_ras_fini(adev);
2752 if (amdgpu_sriov_vf(adev))
2753 if (amdgpu_virt_release_full_gpu(adev, false))
2754 DRM_ERROR("failed to release exclusive mode on fini\n");
2760 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2762 * @work: work_struct.
2764 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2766 struct amdgpu_device *adev =
2767 container_of(work, struct amdgpu_device, delayed_init_work.work);
2770 r = amdgpu_ib_ring_tests(adev);
2772 DRM_ERROR("ib ring test failed (%d).\n", r);
2775 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2777 struct amdgpu_device *adev =
2778 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2780 WARN_ON_ONCE(adev->gfx.gfx_off_state);
2781 WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2783 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2784 adev->gfx.gfx_off_state = true;
2788 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2790 * @adev: amdgpu_device pointer
2792 * Main suspend function for hardware IPs. The list of all the hardware
2793 * IPs that make up the asic is walked, clockgating is disabled and the
2794 * suspend callbacks are run. suspend puts the hardware and software state
2795 * in each IP into a state suitable for suspend.
2796 * Returns 0 on success, negative error code on failure.
2798 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2802 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2803 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2805 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2806 if (!adev->ip_blocks[i].status.valid)
2809 /* displays are handled separately */
2810 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2813 /* XXX handle errors */
2814 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2815 /* XXX handle errors */
2817 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2818 adev->ip_blocks[i].version->funcs->name, r);
2822 adev->ip_blocks[i].status.hw = false;
2829 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2831 * @adev: amdgpu_device pointer
2833 * Main suspend function for hardware IPs. The list of all the hardware
2834 * IPs that make up the asic is walked, clockgating is disabled and the
2835 * suspend callbacks are run. suspend puts the hardware and software state
2836 * in each IP into a state suitable for suspend.
2837 * Returns 0 on success, negative error code on failure.
2839 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2844 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
2846 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2847 if (!adev->ip_blocks[i].status.valid)
2849 /* displays are handled in phase1 */
2850 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2852 /* PSP lost connection when err_event_athub occurs */
2853 if (amdgpu_ras_intr_triggered() &&
2854 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2855 adev->ip_blocks[i].status.hw = false;
2859 /* skip unnecessary suspend if we do not initialize them yet */
2860 if (adev->gmc.xgmi.pending_reset &&
2861 !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2862 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2863 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2864 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2865 adev->ip_blocks[i].status.hw = false;
2869 /* skip suspend of gfx and psp for S0ix
2870 * gfx is in gfxoff state, so on resume it will exit gfxoff just
2871 * like at runtime. PSP is also part of the always on hardware
2872 * so no need to suspend it.
2874 if (adev->in_s0ix &&
2875 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
2876 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
2879 /* XXX handle errors */
2880 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2881 /* XXX handle errors */
2883 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2884 adev->ip_blocks[i].version->funcs->name, r);
2886 adev->ip_blocks[i].status.hw = false;
2887 /* handle putting the SMC in the appropriate state */
2888 if(!amdgpu_sriov_vf(adev)){
2889 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2890 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
2892 DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2893 adev->mp1_state, r);
2904 * amdgpu_device_ip_suspend - run suspend for hardware IPs
2906 * @adev: amdgpu_device pointer
2908 * Main suspend function for hardware IPs. The list of all the hardware
2909 * IPs that make up the asic is walked, clockgating is disabled and the
2910 * suspend callbacks are run. suspend puts the hardware and software state
2911 * in each IP into a state suitable for suspend.
2912 * Returns 0 on success, negative error code on failure.
2914 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2918 if (amdgpu_sriov_vf(adev)) {
2919 amdgpu_virt_fini_data_exchange(adev);
2920 amdgpu_virt_request_full_gpu(adev, false);
2923 r = amdgpu_device_ip_suspend_phase1(adev);
2926 r = amdgpu_device_ip_suspend_phase2(adev);
2928 if (amdgpu_sriov_vf(adev))
2929 amdgpu_virt_release_full_gpu(adev, false);
2934 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2938 static enum amd_ip_block_type ip_order[] = {
2939 AMD_IP_BLOCK_TYPE_GMC,
2940 AMD_IP_BLOCK_TYPE_COMMON,
2941 AMD_IP_BLOCK_TYPE_PSP,
2942 AMD_IP_BLOCK_TYPE_IH,
2945 for (i = 0; i < adev->num_ip_blocks; i++) {
2947 struct amdgpu_ip_block *block;
2949 block = &adev->ip_blocks[i];
2950 block->status.hw = false;
2952 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
2954 if (block->version->type != ip_order[j] ||
2955 !block->status.valid)
2958 r = block->version->funcs->hw_init(adev);
2959 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2962 block->status.hw = true;
2969 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2973 static enum amd_ip_block_type ip_order[] = {
2974 AMD_IP_BLOCK_TYPE_SMC,
2975 AMD_IP_BLOCK_TYPE_DCE,
2976 AMD_IP_BLOCK_TYPE_GFX,
2977 AMD_IP_BLOCK_TYPE_SDMA,
2978 AMD_IP_BLOCK_TYPE_UVD,
2979 AMD_IP_BLOCK_TYPE_VCE,
2980 AMD_IP_BLOCK_TYPE_VCN
2983 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2985 struct amdgpu_ip_block *block;
2987 for (j = 0; j < adev->num_ip_blocks; j++) {
2988 block = &adev->ip_blocks[j];
2990 if (block->version->type != ip_order[i] ||
2991 !block->status.valid ||
2995 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
2996 r = block->version->funcs->resume(adev);
2998 r = block->version->funcs->hw_init(adev);
3000 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3003 block->status.hw = true;
3011 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3013 * @adev: amdgpu_device pointer
3015 * First resume function for hardware IPs. The list of all the hardware
3016 * IPs that make up the asic is walked and the resume callbacks are run for
3017 * COMMON, GMC, and IH. resume puts the hardware into a functional state
3018 * after a suspend and updates the software state as necessary. This
3019 * function is also used for restoring the GPU after a GPU reset.
3020 * Returns 0 on success, negative error code on failure.
3022 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3026 for (i = 0; i < adev->num_ip_blocks; i++) {
3027 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3029 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3030 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3031 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
3033 r = adev->ip_blocks[i].version->funcs->resume(adev);
3035 DRM_ERROR("resume of IP block <%s> failed %d\n",
3036 adev->ip_blocks[i].version->funcs->name, r);
3039 adev->ip_blocks[i].status.hw = true;
3047 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3049 * @adev: amdgpu_device pointer
3051 * First resume function for hardware IPs. The list of all the hardware
3052 * IPs that make up the asic is walked and the resume callbacks are run for
3053 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
3054 * functional state after a suspend and updates the software state as
3055 * necessary. This function is also used for restoring the GPU after a GPU
3057 * Returns 0 on success, negative error code on failure.
3059 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3063 for (i = 0; i < adev->num_ip_blocks; i++) {
3064 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3066 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3067 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3068 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3069 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3071 r = adev->ip_blocks[i].version->funcs->resume(adev);
3073 DRM_ERROR("resume of IP block <%s> failed %d\n",
3074 adev->ip_blocks[i].version->funcs->name, r);
3077 adev->ip_blocks[i].status.hw = true;
3084 * amdgpu_device_ip_resume - run resume for hardware IPs
3086 * @adev: amdgpu_device pointer
3088 * Main resume function for hardware IPs. The hardware IPs
3089 * are split into two resume functions because they are
3090 * are also used in in recovering from a GPU reset and some additional
3091 * steps need to be take between them. In this case (S3/S4) they are
3093 * Returns 0 on success, negative error code on failure.
3095 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3099 r = amdgpu_device_ip_resume_phase1(adev);
3103 r = amdgpu_device_fw_loading(adev);
3107 r = amdgpu_device_ip_resume_phase2(adev);
3113 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3115 * @adev: amdgpu_device pointer
3117 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3119 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3121 if (amdgpu_sriov_vf(adev)) {
3122 if (adev->is_atom_fw) {
3123 if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3124 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3126 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3127 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3130 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3131 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3136 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3138 * @asic_type: AMD asic type
3140 * Check if there is DC (new modesetting infrastructre) support for an asic.
3141 * returns true if DC has support, false if not.
3143 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3145 switch (asic_type) {
3146 #if defined(CONFIG_DRM_AMD_DC)
3147 #if defined(CONFIG_DRM_AMD_DC_SI)
3158 * We have systems in the wild with these ASICs that require
3159 * LVDS and VGA support which is not supported with DC.
3161 * Fallback to the non-DC driver here by default so as not to
3162 * cause regressions.
3164 return amdgpu_dc > 0;
3168 case CHIP_POLARIS10:
3169 case CHIP_POLARIS11:
3170 case CHIP_POLARIS12:
3177 #if defined(CONFIG_DRM_AMD_DC_DCN)
3183 case CHIP_SIENNA_CICHLID:
3184 case CHIP_NAVY_FLOUNDER:
3185 case CHIP_DIMGREY_CAVEFISH:
3186 case CHIP_BEIGE_GOBY:
3188 case CHIP_YELLOW_CARP:
3190 return amdgpu_dc != 0;
3194 DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3195 "but isn't supported by ASIC, ignoring\n");
3201 * amdgpu_device_has_dc_support - check if dc is supported
3203 * @adev: amdgpu_device pointer
3205 * Returns true for supported, false for not supported
3207 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3209 if (amdgpu_sriov_vf(adev) ||
3210 adev->enable_virtual_display ||
3211 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3214 return amdgpu_device_asic_has_dc_support(adev->asic_type);
3217 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3219 struct amdgpu_device *adev =
3220 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3221 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3223 /* It's a bug to not have a hive within this function */
3228 * Use task barrier to synchronize all xgmi reset works across the
3229 * hive. task_barrier_enter and task_barrier_exit will block
3230 * until all the threads running the xgmi reset works reach
3231 * those points. task_barrier_full will do both blocks.
3233 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3235 task_barrier_enter(&hive->tb);
3236 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3238 if (adev->asic_reset_res)
3241 task_barrier_exit(&hive->tb);
3242 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3244 if (adev->asic_reset_res)
3247 if (adev->mmhub.ras_funcs &&
3248 adev->mmhub.ras_funcs->reset_ras_error_count)
3249 adev->mmhub.ras_funcs->reset_ras_error_count(adev);
3252 task_barrier_full(&hive->tb);
3253 adev->asic_reset_res = amdgpu_asic_reset(adev);
3257 if (adev->asic_reset_res)
3258 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3259 adev->asic_reset_res, adev_to_drm(adev)->unique);
3260 amdgpu_put_xgmi_hive(hive);
3263 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3265 char *input = amdgpu_lockup_timeout;
3266 char *timeout_setting = NULL;
3272 * By default timeout for non compute jobs is 10000
3273 * and 60000 for compute jobs.
3274 * In SR-IOV or passthrough mode, timeout for compute
3275 * jobs are 60000 by default.
3277 adev->gfx_timeout = msecs_to_jiffies(10000);
3278 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3279 if (amdgpu_sriov_vf(adev))
3280 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3281 msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3283 adev->compute_timeout = msecs_to_jiffies(60000);
3285 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3286 while ((timeout_setting = strsep(&input, ",")) &&
3287 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3288 ret = kstrtol(timeout_setting, 0, &timeout);
3295 } else if (timeout < 0) {
3296 timeout = MAX_SCHEDULE_TIMEOUT;
3298 timeout = msecs_to_jiffies(timeout);
3303 adev->gfx_timeout = timeout;
3306 adev->compute_timeout = timeout;
3309 adev->sdma_timeout = timeout;
3312 adev->video_timeout = timeout;
3319 * There is only one value specified and
3320 * it should apply to all non-compute jobs.
3323 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3324 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3325 adev->compute_timeout = adev->gfx_timeout;
3332 static const struct attribute *amdgpu_dev_attributes[] = {
3333 &dev_attr_product_name.attr,
3334 &dev_attr_product_number.attr,
3335 &dev_attr_serial_number.attr,
3336 &dev_attr_pcie_replay_count.attr,
3341 * amdgpu_device_init - initialize the driver
3343 * @adev: amdgpu_device pointer
3344 * @flags: driver flags
3346 * Initializes the driver info and hw (all asics).
3347 * Returns 0 for success or an error on failure.
3348 * Called at driver startup.
3350 int amdgpu_device_init(struct amdgpu_device *adev,
3353 struct drm_device *ddev = adev_to_drm(adev);
3354 struct pci_dev *pdev = adev->pdev;
3359 adev->shutdown = false;
3360 adev->flags = flags;
3362 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3363 adev->asic_type = amdgpu_force_asic_type;
3365 adev->asic_type = flags & AMD_ASIC_MASK;
3367 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3368 if (amdgpu_emu_mode == 1)
3369 adev->usec_timeout *= 10;
3370 adev->gmc.gart_size = 512 * 1024 * 1024;
3371 adev->accel_working = false;
3372 adev->num_rings = 0;
3373 adev->mman.buffer_funcs = NULL;
3374 adev->mman.buffer_funcs_ring = NULL;
3375 adev->vm_manager.vm_pte_funcs = NULL;
3376 adev->vm_manager.vm_pte_num_scheds = 0;
3377 adev->gmc.gmc_funcs = NULL;
3378 adev->harvest_ip_mask = 0x0;
3379 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3380 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3382 adev->smc_rreg = &amdgpu_invalid_rreg;
3383 adev->smc_wreg = &amdgpu_invalid_wreg;
3384 adev->pcie_rreg = &amdgpu_invalid_rreg;
3385 adev->pcie_wreg = &amdgpu_invalid_wreg;
3386 adev->pciep_rreg = &amdgpu_invalid_rreg;
3387 adev->pciep_wreg = &amdgpu_invalid_wreg;
3388 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3389 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3390 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3391 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3392 adev->didt_rreg = &amdgpu_invalid_rreg;
3393 adev->didt_wreg = &amdgpu_invalid_wreg;
3394 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3395 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3396 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3397 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3399 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3400 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3401 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3403 /* mutex initialization are all done here so we
3404 * can recall function without having locking issues */
3405 mutex_init(&adev->firmware.mutex);
3406 mutex_init(&adev->pm.mutex);
3407 mutex_init(&adev->gfx.gpu_clock_mutex);
3408 mutex_init(&adev->srbm_mutex);
3409 mutex_init(&adev->gfx.pipe_reserve_mutex);
3410 mutex_init(&adev->gfx.gfx_off_mutex);
3411 mutex_init(&adev->grbm_idx_mutex);
3412 mutex_init(&adev->mn_lock);
3413 mutex_init(&adev->virt.vf_errors.lock);
3414 hash_init(adev->mn_hash);
3415 atomic_set(&adev->in_gpu_reset, 0);
3416 init_rwsem(&adev->reset_sem);
3417 mutex_init(&adev->psp.mutex);
3418 mutex_init(&adev->notifier_lock);
3420 r = amdgpu_device_init_apu_flags(adev);
3424 r = amdgpu_device_check_arguments(adev);
3428 spin_lock_init(&adev->mmio_idx_lock);
3429 spin_lock_init(&adev->smc_idx_lock);
3430 spin_lock_init(&adev->pcie_idx_lock);
3431 spin_lock_init(&adev->uvd_ctx_idx_lock);
3432 spin_lock_init(&adev->didt_idx_lock);
3433 spin_lock_init(&adev->gc_cac_idx_lock);
3434 spin_lock_init(&adev->se_cac_idx_lock);
3435 spin_lock_init(&adev->audio_endpt_idx_lock);
3436 spin_lock_init(&adev->mm_stats.lock);
3438 INIT_LIST_HEAD(&adev->shadow_list);
3439 mutex_init(&adev->shadow_list_lock);
3441 INIT_LIST_HEAD(&adev->reset_list);
3443 INIT_DELAYED_WORK(&adev->delayed_init_work,
3444 amdgpu_device_delayed_init_work_handler);
3445 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3446 amdgpu_device_delay_enable_gfx_off);
3448 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3450 adev->gfx.gfx_off_req_count = 1;
3451 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3453 atomic_set(&adev->throttling_logging_enabled, 1);
3455 * If throttling continues, logging will be performed every minute
3456 * to avoid log flooding. "-1" is subtracted since the thermal
3457 * throttling interrupt comes every second. Thus, the total logging
3458 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3459 * for throttling interrupt) = 60 seconds.
3461 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3462 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3464 /* Registers mapping */
3465 /* TODO: block userspace mapping of io register */
3466 if (adev->asic_type >= CHIP_BONAIRE) {
3467 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3468 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3470 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3471 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3474 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3475 if (adev->rmmio == NULL) {
3478 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3479 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3481 /* enable PCIE atomic ops */
3482 r = pci_enable_atomic_ops_to_root(adev->pdev,
3483 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3484 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3486 adev->have_atomics_support = false;
3487 DRM_INFO("PCIE atomic ops is not supported\n");
3489 adev->have_atomics_support = true;
3492 amdgpu_device_get_pcie_info(adev);
3495 DRM_INFO("MCBP is enabled\n");
3497 if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
3498 adev->enable_mes = true;
3500 /* detect hw virtualization here */
3501 amdgpu_detect_virtualization(adev);
3503 r = amdgpu_device_get_job_timeout_settings(adev);
3505 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3509 /* early init functions */
3510 r = amdgpu_device_ip_early_init(adev);
3514 /* doorbell bar mapping and doorbell index init*/
3515 amdgpu_device_doorbell_init(adev);
3517 if (amdgpu_emu_mode == 1) {
3518 /* post the asic on emulation mode */
3519 emu_soc_asic_init(adev);
3520 goto fence_driver_init;
3523 amdgpu_reset_init(adev);
3525 /* detect if we are with an SRIOV vbios */
3526 amdgpu_device_detect_sriov_bios(adev);
3528 /* check if we need to reset the asic
3529 * E.g., driver was not cleanly unloaded previously, etc.
3531 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3532 if (adev->gmc.xgmi.num_physical_nodes) {
3533 dev_info(adev->dev, "Pending hive reset.\n");
3534 adev->gmc.xgmi.pending_reset = true;
3535 /* Only need to init necessary block for SMU to handle the reset */
3536 for (i = 0; i < adev->num_ip_blocks; i++) {
3537 if (!adev->ip_blocks[i].status.valid)
3539 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3540 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3541 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3542 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3543 DRM_DEBUG("IP %s disabled for hw_init.\n",
3544 adev->ip_blocks[i].version->funcs->name);
3545 adev->ip_blocks[i].status.hw = true;
3549 r = amdgpu_asic_reset(adev);
3551 dev_err(adev->dev, "asic reset on init failed\n");
3557 pci_enable_pcie_error_reporting(adev->pdev);
3559 /* Post card if necessary */
3560 if (amdgpu_device_need_post(adev)) {
3562 dev_err(adev->dev, "no vBIOS found\n");
3566 DRM_INFO("GPU posting now...\n");
3567 r = amdgpu_device_asic_init(adev);
3569 dev_err(adev->dev, "gpu post error!\n");
3574 if (adev->is_atom_fw) {
3575 /* Initialize clocks */
3576 r = amdgpu_atomfirmware_get_clock_info(adev);
3578 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3579 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3583 /* Initialize clocks */
3584 r = amdgpu_atombios_get_clock_info(adev);
3586 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3587 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3590 /* init i2c buses */
3591 if (!amdgpu_device_has_dc_support(adev))
3592 amdgpu_atombios_i2c_init(adev);
3597 r = amdgpu_fence_driver_init(adev);
3599 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
3600 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3604 /* init the mode config */
3605 drm_mode_config_init(adev_to_drm(adev));
3607 r = amdgpu_device_ip_init(adev);
3609 /* failed in exclusive mode due to timeout */
3610 if (amdgpu_sriov_vf(adev) &&
3611 !amdgpu_sriov_runtime(adev) &&
3612 amdgpu_virt_mmio_blocked(adev) &&
3613 !amdgpu_virt_wait_reset(adev)) {
3614 dev_err(adev->dev, "VF exclusive mode timeout\n");
3615 /* Don't send request since VF is inactive. */
3616 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3617 adev->virt.ops = NULL;
3619 goto release_ras_con;
3621 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3622 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3623 goto release_ras_con;
3627 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3628 adev->gfx.config.max_shader_engines,
3629 adev->gfx.config.max_sh_per_se,
3630 adev->gfx.config.max_cu_per_sh,
3631 adev->gfx.cu_info.number);
3633 adev->accel_working = true;
3635 amdgpu_vm_check_compute_bug(adev);
3637 /* Initialize the buffer migration limit. */
3638 if (amdgpu_moverate >= 0)
3639 max_MBps = amdgpu_moverate;
3641 max_MBps = 8; /* Allow 8 MB/s. */
3642 /* Get a log2 for easy divisions. */
3643 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3645 amdgpu_fbdev_init(adev);
3647 r = amdgpu_pm_sysfs_init(adev);
3649 adev->pm_sysfs_en = false;
3650 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3652 adev->pm_sysfs_en = true;
3654 r = amdgpu_ucode_sysfs_init(adev);
3656 adev->ucode_sysfs_en = false;
3657 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3659 adev->ucode_sysfs_en = true;
3661 if ((amdgpu_testing & 1)) {
3662 if (adev->accel_working)
3663 amdgpu_test_moves(adev);
3665 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3667 if (amdgpu_benchmarking) {
3668 if (adev->accel_working)
3669 amdgpu_benchmark(adev, amdgpu_benchmarking);
3671 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3675 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3676 * Otherwise the mgpu fan boost feature will be skipped due to the
3677 * gpu instance is counted less.
3679 amdgpu_register_gpu_instance(adev);
3681 /* enable clockgating, etc. after ib tests, etc. since some blocks require
3682 * explicit gating rather than handling it automatically.
3684 if (!adev->gmc.xgmi.pending_reset) {
3685 r = amdgpu_device_ip_late_init(adev);
3687 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3688 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3689 goto release_ras_con;
3692 amdgpu_ras_resume(adev);
3693 queue_delayed_work(system_wq, &adev->delayed_init_work,
3694 msecs_to_jiffies(AMDGPU_RESUME_MS));
3697 if (amdgpu_sriov_vf(adev))
3698 flush_delayed_work(&adev->delayed_init_work);
3700 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3702 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3704 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3705 r = amdgpu_pmu_init(adev);
3707 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3709 /* Have stored pci confspace at hand for restore in sudden PCI error */
3710 if (amdgpu_device_cache_pci_state(adev->pdev))
3711 pci_restore_state(pdev);
3713 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3714 /* this will fail for cards that aren't VGA class devices, just
3716 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3717 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
3719 if (amdgpu_device_supports_px(ddev)) {
3721 vga_switcheroo_register_client(adev->pdev,
3722 &amdgpu_switcheroo_ops, px);
3723 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3726 if (adev->gmc.xgmi.pending_reset)
3727 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3728 msecs_to_jiffies(AMDGPU_RESUME_MS));
3733 amdgpu_release_ras_context(adev);
3736 amdgpu_vf_error_trans_all(adev);
3741 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3743 /* Clear all CPU mappings pointing to this device */
3744 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3746 /* Unmap all mapped bars - Doorbell, registers and VRAM */
3747 amdgpu_device_doorbell_fini(adev);
3749 iounmap(adev->rmmio);
3751 if (adev->mman.aper_base_kaddr)
3752 iounmap(adev->mman.aper_base_kaddr);
3753 adev->mman.aper_base_kaddr = NULL;
3755 /* Memory manager related */
3756 if (!adev->gmc.xgmi.connected_to_cpu) {
3757 arch_phys_wc_del(adev->gmc.vram_mtrr);
3758 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3763 * amdgpu_device_fini - tear down the driver
3765 * @adev: amdgpu_device pointer
3767 * Tear down the driver info (all asics).
3768 * Called at driver shutdown.
3770 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3772 dev_info(adev->dev, "amdgpu: finishing device.\n");
3773 flush_delayed_work(&adev->delayed_init_work);
3774 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3775 adev->shutdown = true;
3777 /* make sure IB test finished before entering exclusive mode
3778 * to avoid preemption on IB test
3780 if (amdgpu_sriov_vf(adev)) {
3781 amdgpu_virt_request_full_gpu(adev, false);
3782 amdgpu_virt_fini_data_exchange(adev);
3785 /* disable all interrupts */
3786 amdgpu_irq_disable_all(adev);
3787 if (adev->mode_info.mode_config_initialized){
3788 if (!amdgpu_device_has_dc_support(adev))
3789 drm_helper_force_disable_all(adev_to_drm(adev));
3791 drm_atomic_helper_shutdown(adev_to_drm(adev));
3793 amdgpu_fence_driver_fini_hw(adev);
3795 if (adev->pm_sysfs_en)
3796 amdgpu_pm_sysfs_fini(adev);
3797 if (adev->ucode_sysfs_en)
3798 amdgpu_ucode_sysfs_fini(adev);
3799 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3801 amdgpu_fbdev_fini(adev);
3803 amdgpu_irq_fini_hw(adev);
3805 amdgpu_device_ip_fini_early(adev);
3807 amdgpu_gart_dummy_page_fini(adev);
3809 amdgpu_device_unmap_mmio(adev);
3812 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
3814 amdgpu_device_ip_fini(adev);
3815 amdgpu_fence_driver_fini_sw(adev);
3816 release_firmware(adev->firmware.gpu_info_fw);
3817 adev->firmware.gpu_info_fw = NULL;
3818 adev->accel_working = false;
3820 amdgpu_reset_fini(adev);
3822 /* free i2c buses */
3823 if (!amdgpu_device_has_dc_support(adev))
3824 amdgpu_i2c_fini(adev);
3826 if (amdgpu_emu_mode != 1)
3827 amdgpu_atombios_fini(adev);
3831 if (amdgpu_device_supports_px(adev_to_drm(adev))) {
3832 vga_switcheroo_unregister_client(adev->pdev);
3833 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3835 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3836 vga_client_register(adev->pdev, NULL, NULL, NULL);
3838 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3839 amdgpu_pmu_fini(adev);
3840 if (adev->mman.discovery_bin)
3841 amdgpu_discovery_fini(adev);
3843 kfree(adev->pci_state);
3852 * amdgpu_device_suspend - initiate device suspend
3854 * @dev: drm dev pointer
3855 * @fbcon : notify the fbdev of suspend
3857 * Puts the hw in the suspend state (all asics).
3858 * Returns 0 for success or an error on failure.
3859 * Called at driver suspend.
3861 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
3863 struct amdgpu_device *adev = drm_to_adev(dev);
3865 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3868 adev->in_suspend = true;
3870 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
3871 DRM_WARN("smart shift update failed\n");
3873 drm_kms_helper_poll_disable(dev);
3876 amdgpu_fbdev_set_suspend(adev, 1);
3878 cancel_delayed_work_sync(&adev->delayed_init_work);
3880 amdgpu_ras_suspend(adev);
3882 amdgpu_device_ip_suspend_phase1(adev);
3885 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
3887 /* evict vram memory */
3888 amdgpu_bo_evict_vram(adev);
3890 amdgpu_fence_driver_suspend(adev);
3892 amdgpu_device_ip_suspend_phase2(adev);
3893 /* evict remaining vram memory
3894 * This second call to evict vram is to evict the gart page table
3897 amdgpu_bo_evict_vram(adev);
3903 * amdgpu_device_resume - initiate device resume
3905 * @dev: drm dev pointer
3906 * @fbcon : notify the fbdev of resume
3908 * Bring the hw back to operating state (all asics).
3909 * Returns 0 for success or an error on failure.
3910 * Called at driver resume.
3912 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
3914 struct amdgpu_device *adev = drm_to_adev(dev);
3917 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3921 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
3924 if (amdgpu_device_need_post(adev)) {
3925 r = amdgpu_device_asic_init(adev);
3927 dev_err(adev->dev, "amdgpu asic init failed\n");
3930 r = amdgpu_device_ip_resume(adev);
3932 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
3935 amdgpu_fence_driver_resume(adev);
3938 r = amdgpu_device_ip_late_init(adev);
3942 queue_delayed_work(system_wq, &adev->delayed_init_work,
3943 msecs_to_jiffies(AMDGPU_RESUME_MS));
3945 if (!adev->in_s0ix) {
3946 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
3951 /* Make sure IB tests flushed */
3952 flush_delayed_work(&adev->delayed_init_work);
3955 amdgpu_fbdev_set_suspend(adev, 0);
3957 drm_kms_helper_poll_enable(dev);
3959 amdgpu_ras_resume(adev);
3962 * Most of the connector probing functions try to acquire runtime pm
3963 * refs to ensure that the GPU is powered on when connector polling is
3964 * performed. Since we're calling this from a runtime PM callback,
3965 * trying to acquire rpm refs will cause us to deadlock.
3967 * Since we're guaranteed to be holding the rpm lock, it's safe to
3968 * temporarily disable the rpm helpers so this doesn't deadlock us.
3971 dev->dev->power.disable_depth++;
3973 if (!amdgpu_device_has_dc_support(adev))
3974 drm_helper_hpd_irq_event(dev);
3976 drm_kms_helper_hotplug_event(dev);
3978 dev->dev->power.disable_depth--;
3980 adev->in_suspend = false;
3982 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
3983 DRM_WARN("smart shift update failed\n");
3989 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
3991 * @adev: amdgpu_device pointer
3993 * The list of all the hardware IPs that make up the asic is walked and
3994 * the check_soft_reset callbacks are run. check_soft_reset determines
3995 * if the asic is still hung or not.
3996 * Returns true if any of the IPs are still in a hung state, false if not.
3998 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4001 bool asic_hang = false;
4003 if (amdgpu_sriov_vf(adev))
4006 if (amdgpu_asic_need_full_reset(adev))
4009 for (i = 0; i < adev->num_ip_blocks; i++) {
4010 if (!adev->ip_blocks[i].status.valid)
4012 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4013 adev->ip_blocks[i].status.hang =
4014 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4015 if (adev->ip_blocks[i].status.hang) {
4016 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4024 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4026 * @adev: amdgpu_device pointer
4028 * The list of all the hardware IPs that make up the asic is walked and the
4029 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
4030 * handles any IP specific hardware or software state changes that are
4031 * necessary for a soft reset to succeed.
4032 * Returns 0 on success, negative error code on failure.
4034 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4038 for (i = 0; i < adev->num_ip_blocks; i++) {
4039 if (!adev->ip_blocks[i].status.valid)
4041 if (adev->ip_blocks[i].status.hang &&
4042 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4043 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4053 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4055 * @adev: amdgpu_device pointer
4057 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
4058 * reset is necessary to recover.
4059 * Returns true if a full asic reset is required, false if not.
4061 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4065 if (amdgpu_asic_need_full_reset(adev))
4068 for (i = 0; i < adev->num_ip_blocks; i++) {
4069 if (!adev->ip_blocks[i].status.valid)
4071 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4072 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4073 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4074 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4075 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4076 if (adev->ip_blocks[i].status.hang) {
4077 dev_info(adev->dev, "Some block need full reset!\n");
4086 * amdgpu_device_ip_soft_reset - do a soft reset
4088 * @adev: amdgpu_device pointer
4090 * The list of all the hardware IPs that make up the asic is walked and the
4091 * soft_reset callbacks are run if the block is hung. soft_reset handles any
4092 * IP specific hardware or software state changes that are necessary to soft
4094 * Returns 0 on success, negative error code on failure.
4096 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4100 for (i = 0; i < adev->num_ip_blocks; i++) {
4101 if (!adev->ip_blocks[i].status.valid)
4103 if (adev->ip_blocks[i].status.hang &&
4104 adev->ip_blocks[i].version->funcs->soft_reset) {
4105 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4115 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4117 * @adev: amdgpu_device pointer
4119 * The list of all the hardware IPs that make up the asic is walked and the
4120 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
4121 * handles any IP specific hardware or software state changes that are
4122 * necessary after the IP has been soft reset.
4123 * Returns 0 on success, negative error code on failure.
4125 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4129 for (i = 0; i < adev->num_ip_blocks; i++) {
4130 if (!adev->ip_blocks[i].status.valid)
4132 if (adev->ip_blocks[i].status.hang &&
4133 adev->ip_blocks[i].version->funcs->post_soft_reset)
4134 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4143 * amdgpu_device_recover_vram - Recover some VRAM contents
4145 * @adev: amdgpu_device pointer
4147 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
4148 * restore things like GPUVM page tables after a GPU reset where
4149 * the contents of VRAM might be lost.
4152 * 0 on success, negative error code on failure.
4154 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4156 struct dma_fence *fence = NULL, *next = NULL;
4157 struct amdgpu_bo *shadow;
4158 struct amdgpu_bo_vm *vmbo;
4161 if (amdgpu_sriov_runtime(adev))
4162 tmo = msecs_to_jiffies(8000);
4164 tmo = msecs_to_jiffies(100);
4166 dev_info(adev->dev, "recover vram bo from shadow start\n");
4167 mutex_lock(&adev->shadow_list_lock);
4168 list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4170 /* No need to recover an evicted BO */
4171 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4172 shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4173 shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4176 r = amdgpu_bo_restore_shadow(shadow, &next);
4181 tmo = dma_fence_wait_timeout(fence, false, tmo);
4182 dma_fence_put(fence);
4187 } else if (tmo < 0) {
4195 mutex_unlock(&adev->shadow_list_lock);
4198 tmo = dma_fence_wait_timeout(fence, false, tmo);
4199 dma_fence_put(fence);
4201 if (r < 0 || tmo <= 0) {
4202 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4206 dev_info(adev->dev, "recover vram bo from shadow done\n");
4212 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4214 * @adev: amdgpu_device pointer
4215 * @from_hypervisor: request from hypervisor
4217 * do VF FLR and reinitialize Asic
4218 * return 0 means succeeded otherwise failed
4220 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4221 bool from_hypervisor)
4225 if (from_hypervisor)
4226 r = amdgpu_virt_request_full_gpu(adev, true);
4228 r = amdgpu_virt_reset_gpu(adev);
4232 amdgpu_amdkfd_pre_reset(adev);
4234 /* Resume IP prior to SMC */
4235 r = amdgpu_device_ip_reinit_early_sriov(adev);
4239 amdgpu_virt_init_data_exchange(adev);
4240 /* we need recover gart prior to run SMC/CP/SDMA resume */
4241 amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
4243 r = amdgpu_device_fw_loading(adev);
4247 /* now we are okay to resume SMC/CP/SDMA */
4248 r = amdgpu_device_ip_reinit_late_sriov(adev);
4252 amdgpu_irq_gpu_reset_resume_helper(adev);
4253 r = amdgpu_ib_ring_tests(adev);
4254 amdgpu_amdkfd_post_reset(adev);
4257 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4258 amdgpu_inc_vram_lost(adev);
4259 r = amdgpu_device_recover_vram(adev);
4261 amdgpu_virt_release_full_gpu(adev, true);
4267 * amdgpu_device_has_job_running - check if there is any job in mirror list
4269 * @adev: amdgpu_device pointer
4271 * check if there is any job in mirror list
4273 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4276 struct drm_sched_job *job;
4278 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4279 struct amdgpu_ring *ring = adev->rings[i];
4281 if (!ring || !ring->sched.thread)
4284 spin_lock(&ring->sched.job_list_lock);
4285 job = list_first_entry_or_null(&ring->sched.pending_list,
4286 struct drm_sched_job, list);
4287 spin_unlock(&ring->sched.job_list_lock);
4295 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4297 * @adev: amdgpu_device pointer
4299 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4302 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4304 if (!amdgpu_device_ip_check_soft_reset(adev)) {
4305 dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
4309 if (amdgpu_gpu_recovery == 0)
4312 if (amdgpu_sriov_vf(adev))
4315 if (amdgpu_gpu_recovery == -1) {
4316 switch (adev->asic_type) {
4322 case CHIP_POLARIS10:
4323 case CHIP_POLARIS11:
4324 case CHIP_POLARIS12:
4335 case CHIP_SIENNA_CICHLID:
4336 case CHIP_NAVY_FLOUNDER:
4337 case CHIP_DIMGREY_CAVEFISH:
4338 case CHIP_BEIGE_GOBY:
4340 case CHIP_ALDEBARAN:
4350 dev_info(adev->dev, "GPU recovery disabled.\n");
4354 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4359 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4361 dev_info(adev->dev, "GPU mode1 reset\n");
4364 pci_clear_master(adev->pdev);
4366 amdgpu_device_cache_pci_state(adev->pdev);
4368 if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4369 dev_info(adev->dev, "GPU smu mode1 reset\n");
4370 ret = amdgpu_dpm_mode1_reset(adev);
4372 dev_info(adev->dev, "GPU psp mode1 reset\n");
4373 ret = psp_gpu_reset(adev);
4377 dev_err(adev->dev, "GPU mode1 reset failed\n");
4379 amdgpu_device_load_pci_state(adev->pdev);
4381 /* wait for asic to come out of reset */
4382 for (i = 0; i < adev->usec_timeout; i++) {
4383 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4385 if (memsize != 0xffffffff)
4390 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4394 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4395 struct amdgpu_reset_context *reset_context)
4398 struct amdgpu_job *job = NULL;
4399 bool need_full_reset =
4400 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4402 if (reset_context->reset_req_dev == adev)
4403 job = reset_context->job;
4405 /* no need to dump if device is not in good state during probe period */
4406 if (!adev->gmc.xgmi.pending_reset)
4407 amdgpu_debugfs_wait_dump(adev);
4409 if (amdgpu_sriov_vf(adev)) {
4410 /* stop the data exchange thread */
4411 amdgpu_virt_fini_data_exchange(adev);
4414 /* block all schedulers and reset given job's ring */
4415 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4416 struct amdgpu_ring *ring = adev->rings[i];
4418 if (!ring || !ring->sched.thread)
4421 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4422 amdgpu_fence_driver_force_completion(ring);
4426 drm_sched_increase_karma(&job->base);
4428 r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4429 /* If reset handler not implemented, continue; otherwise return */
4435 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4436 if (!amdgpu_sriov_vf(adev)) {
4438 if (!need_full_reset)
4439 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4441 if (!need_full_reset) {
4442 amdgpu_device_ip_pre_soft_reset(adev);
4443 r = amdgpu_device_ip_soft_reset(adev);
4444 amdgpu_device_ip_post_soft_reset(adev);
4445 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4446 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4447 need_full_reset = true;
4451 if (need_full_reset)
4452 r = amdgpu_device_ip_suspend(adev);
4453 if (need_full_reset)
4454 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4456 clear_bit(AMDGPU_NEED_FULL_RESET,
4457 &reset_context->flags);
4463 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4464 struct amdgpu_reset_context *reset_context)
4466 struct amdgpu_device *tmp_adev = NULL;
4467 bool need_full_reset, skip_hw_reset, vram_lost = false;
4470 /* Try reset handler method first */
4471 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4473 r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4474 /* If reset handler not implemented, continue; otherwise return */
4480 /* Reset handler not implemented, use the default method */
4482 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4483 skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4486 * ASIC reset has to be done on all XGMI hive nodes ASAP
4487 * to allow proper links negotiation in FW (within 1 sec)
4489 if (!skip_hw_reset && need_full_reset) {
4490 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4491 /* For XGMI run all resets in parallel to speed up the process */
4492 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4493 tmp_adev->gmc.xgmi.pending_reset = false;
4494 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4497 r = amdgpu_asic_reset(tmp_adev);
4500 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4501 r, adev_to_drm(tmp_adev)->unique);
4506 /* For XGMI wait for all resets to complete before proceed */
4508 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4509 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4510 flush_work(&tmp_adev->xgmi_reset_work);
4511 r = tmp_adev->asic_reset_res;
4519 if (!r && amdgpu_ras_intr_triggered()) {
4520 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4521 if (tmp_adev->mmhub.ras_funcs &&
4522 tmp_adev->mmhub.ras_funcs->reset_ras_error_count)
4523 tmp_adev->mmhub.ras_funcs->reset_ras_error_count(tmp_adev);
4526 amdgpu_ras_intr_cleared();
4529 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4530 if (need_full_reset) {
4532 r = amdgpu_device_asic_init(tmp_adev);
4534 dev_warn(tmp_adev->dev, "asic atom init failed!");
4536 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4537 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4541 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4543 DRM_INFO("VRAM is lost due to GPU reset!\n");
4544 amdgpu_inc_vram_lost(tmp_adev);
4547 r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
4551 r = amdgpu_device_fw_loading(tmp_adev);
4555 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4560 amdgpu_device_fill_reset_magic(tmp_adev);
4563 * Add this ASIC as tracked as reset was already
4564 * complete successfully.
4566 amdgpu_register_gpu_instance(tmp_adev);
4568 if (!reset_context->hive &&
4569 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4570 amdgpu_xgmi_add_device(tmp_adev);
4572 r = amdgpu_device_ip_late_init(tmp_adev);
4576 amdgpu_fbdev_set_suspend(tmp_adev, 0);
4579 * The GPU enters bad state once faulty pages
4580 * by ECC has reached the threshold, and ras
4581 * recovery is scheduled next. So add one check
4582 * here to break recovery if it indeed exceeds
4583 * bad page threshold, and remind user to
4584 * retire this GPU or setting one bigger
4585 * bad_page_threshold value to fix this once
4586 * probing driver again.
4588 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4590 amdgpu_ras_resume(tmp_adev);
4596 /* Update PSP FW topology after reset */
4597 if (reset_context->hive &&
4598 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4599 r = amdgpu_xgmi_update_topology(
4600 reset_context->hive, tmp_adev);
4606 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4607 r = amdgpu_ib_ring_tests(tmp_adev);
4609 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4610 need_full_reset = true;
4617 r = amdgpu_device_recover_vram(tmp_adev);
4619 tmp_adev->asic_reset_res = r;
4623 if (need_full_reset)
4624 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4626 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4630 static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
4631 struct amdgpu_hive_info *hive)
4633 if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
4637 down_write_nest_lock(&adev->reset_sem, &hive->hive_lock);
4639 down_write(&adev->reset_sem);
4642 switch (amdgpu_asic_reset_method(adev)) {
4643 case AMD_RESET_METHOD_MODE1:
4644 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4646 case AMD_RESET_METHOD_MODE2:
4647 adev->mp1_state = PP_MP1_STATE_RESET;
4650 adev->mp1_state = PP_MP1_STATE_NONE;
4657 static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
4659 amdgpu_vf_error_trans_all(adev);
4660 adev->mp1_state = PP_MP1_STATE_NONE;
4661 atomic_set(&adev->in_gpu_reset, 0);
4662 up_write(&adev->reset_sem);
4666 * to lockup a list of amdgpu devices in a hive safely, if not a hive
4667 * with multiple nodes, it will be similar as amdgpu_device_lock_adev.
4669 * unlock won't require roll back.
4671 static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive)
4673 struct amdgpu_device *tmp_adev = NULL;
4675 if (adev->gmc.xgmi.num_physical_nodes > 1) {
4677 dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes");
4680 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4681 if (!amdgpu_device_lock_adev(tmp_adev, hive))
4684 } else if (!amdgpu_device_lock_adev(adev, hive))
4689 if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) {
4691 * if the lockup iteration break in the middle of a hive,
4692 * it may means there may has a race issue,
4693 * or a hive device locked up independently.
4694 * we may be in trouble and may not, so will try to roll back
4695 * the lock and give out a warnning.
4697 dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock");
4698 list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4699 amdgpu_device_unlock_adev(tmp_adev);
4705 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4707 struct pci_dev *p = NULL;
4709 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4710 adev->pdev->bus->number, 1);
4712 pm_runtime_enable(&(p->dev));
4713 pm_runtime_resume(&(p->dev));
4717 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4719 enum amd_reset_method reset_method;
4720 struct pci_dev *p = NULL;
4724 * For now, only BACO and mode1 reset are confirmed
4725 * to suffer the audio issue without proper suspended.
4727 reset_method = amdgpu_asic_reset_method(adev);
4728 if ((reset_method != AMD_RESET_METHOD_BACO) &&
4729 (reset_method != AMD_RESET_METHOD_MODE1))
4732 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4733 adev->pdev->bus->number, 1);
4737 expires = pm_runtime_autosuspend_expiration(&(p->dev));
4740 * If we cannot get the audio device autosuspend delay,
4741 * a fixed 4S interval will be used. Considering 3S is
4742 * the audio controller default autosuspend delay setting.
4743 * 4S used here is guaranteed to cover that.
4745 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4747 while (!pm_runtime_status_suspended(&(p->dev))) {
4748 if (!pm_runtime_suspend(&(p->dev)))
4751 if (expires < ktime_get_mono_fast_ns()) {
4752 dev_warn(adev->dev, "failed to suspend display audio\n");
4753 /* TODO: abort the succeeding gpu reset? */
4758 pm_runtime_disable(&(p->dev));
4763 static void amdgpu_device_recheck_guilty_jobs(
4764 struct amdgpu_device *adev, struct list_head *device_list_handle,
4765 struct amdgpu_reset_context *reset_context)
4769 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4770 struct amdgpu_ring *ring = adev->rings[i];
4772 struct drm_sched_job *s_job;
4774 if (!ring || !ring->sched.thread)
4777 s_job = list_first_entry_or_null(&ring->sched.pending_list,
4778 struct drm_sched_job, list);
4782 /* clear job's guilty and depend the folowing step to decide the real one */
4783 drm_sched_reset_karma(s_job);
4784 drm_sched_resubmit_jobs_ext(&ring->sched, 1);
4786 ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
4787 if (ret == 0) { /* timeout */
4788 DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
4789 ring->sched.name, s_job->id);
4792 drm_sched_increase_karma(s_job);
4795 if (amdgpu_sriov_vf(adev)) {
4796 amdgpu_virt_fini_data_exchange(adev);
4797 r = amdgpu_device_reset_sriov(adev, false);
4799 adev->asic_reset_res = r;
4801 clear_bit(AMDGPU_SKIP_HW_RESET,
4802 &reset_context->flags);
4803 r = amdgpu_do_asic_reset(device_list_handle,
4805 if (r && r == -EAGAIN)
4810 * add reset counter so that the following
4811 * resubmitted job could flush vmid
4813 atomic_inc(&adev->gpu_reset_counter);
4817 /* got the hw fence, signal finished fence */
4818 atomic_dec(ring->sched.score);
4819 dma_fence_get(&s_job->s_fence->finished);
4820 dma_fence_signal(&s_job->s_fence->finished);
4821 dma_fence_put(&s_job->s_fence->finished);
4823 /* remove node from list and free the job */
4824 spin_lock(&ring->sched.job_list_lock);
4825 list_del_init(&s_job->list);
4826 spin_unlock(&ring->sched.job_list_lock);
4827 ring->sched.ops->free_job(s_job);
4832 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
4834 * @adev: amdgpu_device pointer
4835 * @job: which job trigger hang
4837 * Attempt to reset the GPU if it has hung (all asics).
4838 * Attempt to do soft-reset or full-reset and reinitialize Asic
4839 * Returns 0 for success or an error on failure.
4842 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
4843 struct amdgpu_job *job)
4845 struct list_head device_list, *device_list_handle = NULL;
4846 bool job_signaled = false;
4847 struct amdgpu_hive_info *hive = NULL;
4848 struct amdgpu_device *tmp_adev = NULL;
4850 bool need_emergency_restart = false;
4851 bool audio_suspended = false;
4852 int tmp_vram_lost_counter;
4853 struct amdgpu_reset_context reset_context;
4855 memset(&reset_context, 0, sizeof(reset_context));
4858 * Special case: RAS triggered and full reset isn't supported
4860 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
4863 * Flush RAM to disk so that after reboot
4864 * the user can read log and see why the system rebooted.
4866 if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
4867 DRM_WARN("Emergency reboot.");
4870 emergency_restart();
4873 dev_info(adev->dev, "GPU %s begin!\n",
4874 need_emergency_restart ? "jobs stop":"reset");
4877 * Here we trylock to avoid chain of resets executing from
4878 * either trigger by jobs on different adevs in XGMI hive or jobs on
4879 * different schedulers for same device while this TO handler is running.
4880 * We always reset all schedulers for device and all devices for XGMI
4881 * hive so that should take care of them too.
4883 hive = amdgpu_get_xgmi_hive(adev);
4885 if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
4886 DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
4887 job ? job->base.id : -1, hive->hive_id);
4888 amdgpu_put_xgmi_hive(hive);
4890 drm_sched_increase_karma(&job->base);
4893 mutex_lock(&hive->hive_lock);
4896 reset_context.method = AMD_RESET_METHOD_NONE;
4897 reset_context.reset_req_dev = adev;
4898 reset_context.job = job;
4899 reset_context.hive = hive;
4900 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
4903 * lock the device before we try to operate the linked list
4904 * if didn't get the device lock, don't touch the linked list since
4905 * others may iterating it.
4907 r = amdgpu_device_lock_hive_adev(adev, hive);
4909 dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
4910 job ? job->base.id : -1);
4912 /* even we skipped this reset, still need to set the job to guilty */
4914 drm_sched_increase_karma(&job->base);
4919 * Build list of devices to reset.
4920 * In case we are in XGMI hive mode, resort the device list
4921 * to put adev in the 1st position.
4923 INIT_LIST_HEAD(&device_list);
4924 if (adev->gmc.xgmi.num_physical_nodes > 1) {
4925 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
4926 list_add_tail(&tmp_adev->reset_list, &device_list);
4927 if (!list_is_first(&adev->reset_list, &device_list))
4928 list_rotate_to_front(&adev->reset_list, &device_list);
4929 device_list_handle = &device_list;
4931 list_add_tail(&adev->reset_list, &device_list);
4932 device_list_handle = &device_list;
4935 /* block all schedulers and reset given job's ring */
4936 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4938 * Try to put the audio codec into suspend state
4939 * before gpu reset started.
4941 * Due to the power domain of the graphics device
4942 * is shared with AZ power domain. Without this,
4943 * we may change the audio hardware from behind
4944 * the audio driver's back. That will trigger
4945 * some audio codec errors.
4947 if (!amdgpu_device_suspend_display_audio(tmp_adev))
4948 audio_suspended = true;
4950 amdgpu_ras_set_error_query_ready(tmp_adev, false);
4952 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
4954 if (!amdgpu_sriov_vf(tmp_adev))
4955 amdgpu_amdkfd_pre_reset(tmp_adev);
4958 * Mark these ASICs to be reseted as untracked first
4959 * And add them back after reset completed
4961 amdgpu_unregister_gpu_instance(tmp_adev);
4963 amdgpu_fbdev_set_suspend(tmp_adev, 1);
4965 /* disable ras on ALL IPs */
4966 if (!need_emergency_restart &&
4967 amdgpu_device_ip_need_full_reset(tmp_adev))
4968 amdgpu_ras_suspend(tmp_adev);
4970 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4971 struct amdgpu_ring *ring = tmp_adev->rings[i];
4973 if (!ring || !ring->sched.thread)
4976 drm_sched_stop(&ring->sched, job ? &job->base : NULL);
4978 if (need_emergency_restart)
4979 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
4981 atomic_inc(&tmp_adev->gpu_reset_counter);
4984 if (need_emergency_restart)
4985 goto skip_sched_resume;
4988 * Must check guilty signal here since after this point all old
4989 * HW fences are force signaled.
4991 * job->base holds a reference to parent fence
4993 if (job && job->base.s_fence->parent &&
4994 dma_fence_is_signaled(job->base.s_fence->parent)) {
4995 job_signaled = true;
4996 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5000 retry: /* Rest of adevs pre asic reset from XGMI hive. */
5001 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5002 r = amdgpu_device_pre_asic_reset(tmp_adev, &reset_context);
5003 /*TODO Should we stop ?*/
5005 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5006 r, adev_to_drm(tmp_adev)->unique);
5007 tmp_adev->asic_reset_res = r;
5011 tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
5012 /* Actual ASIC resets if needed.*/
5013 /* TODO Implement XGMI hive reset logic for SRIOV */
5014 if (amdgpu_sriov_vf(adev)) {
5015 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5017 adev->asic_reset_res = r;
5019 r = amdgpu_do_asic_reset(device_list_handle, &reset_context);
5020 if (r && r == -EAGAIN)
5026 /* Post ASIC reset for all devs .*/
5027 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5030 * Sometimes a later bad compute job can block a good gfx job as gfx
5031 * and compute ring share internal GC HW mutually. We add an additional
5032 * guilty jobs recheck step to find the real guilty job, it synchronously
5033 * submits and pends for the first job being signaled. If it gets timeout,
5034 * we identify it as a real guilty job.
5036 if (amdgpu_gpu_recovery == 2 &&
5037 !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
5038 amdgpu_device_recheck_guilty_jobs(
5039 tmp_adev, device_list_handle, &reset_context);
5041 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5042 struct amdgpu_ring *ring = tmp_adev->rings[i];
5044 if (!ring || !ring->sched.thread)
5047 /* No point to resubmit jobs if we didn't HW reset*/
5048 if (!tmp_adev->asic_reset_res && !job_signaled)
5049 drm_sched_resubmit_jobs(&ring->sched);
5051 drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
5054 if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
5055 drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5058 tmp_adev->asic_reset_res = 0;
5061 /* bad news, how to tell it to userspace ? */
5062 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5063 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5065 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5066 if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5067 DRM_WARN("smart shift update failed\n");
5072 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5073 /* unlock kfd: SRIOV would do it separately */
5074 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5075 amdgpu_amdkfd_post_reset(tmp_adev);
5077 /* kfd_post_reset will do nothing if kfd device is not initialized,
5078 * need to bring up kfd here if it's not be initialized before
5080 if (!adev->kfd.init_complete)
5081 amdgpu_amdkfd_device_init(adev);
5083 if (audio_suspended)
5084 amdgpu_device_resume_display_audio(tmp_adev);
5085 amdgpu_device_unlock_adev(tmp_adev);
5090 atomic_set(&hive->in_reset, 0);
5091 mutex_unlock(&hive->hive_lock);
5092 amdgpu_put_xgmi_hive(hive);
5095 if (r && r != -EAGAIN)
5096 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5101 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5103 * @adev: amdgpu_device pointer
5105 * Fetchs and stores in the driver the PCIE capabilities (gen speed
5106 * and lanes) of the slot the device is in. Handles APUs and
5107 * virtualized environments where PCIE config space may not be available.
5109 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5111 struct pci_dev *pdev;
5112 enum pci_bus_speed speed_cap, platform_speed_cap;
5113 enum pcie_link_width platform_link_width;
5115 if (amdgpu_pcie_gen_cap)
5116 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5118 if (amdgpu_pcie_lane_cap)
5119 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5121 /* covers APUs as well */
5122 if (pci_is_root_bus(adev->pdev->bus)) {
5123 if (adev->pm.pcie_gen_mask == 0)
5124 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5125 if (adev->pm.pcie_mlw_mask == 0)
5126 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5130 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5133 pcie_bandwidth_available(adev->pdev, NULL,
5134 &platform_speed_cap, &platform_link_width);
5136 if (adev->pm.pcie_gen_mask == 0) {
5139 speed_cap = pcie_get_speed_cap(pdev);
5140 if (speed_cap == PCI_SPEED_UNKNOWN) {
5141 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5142 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5143 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5145 if (speed_cap == PCIE_SPEED_32_0GT)
5146 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5147 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5148 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5149 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5150 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5151 else if (speed_cap == PCIE_SPEED_16_0GT)
5152 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5153 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5154 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5155 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5156 else if (speed_cap == PCIE_SPEED_8_0GT)
5157 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5158 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5159 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5160 else if (speed_cap == PCIE_SPEED_5_0GT)
5161 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5162 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5164 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5167 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5168 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5169 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5171 if (platform_speed_cap == PCIE_SPEED_32_0GT)
5172 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5173 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5174 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5175 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5176 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5177 else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5178 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5179 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5180 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5181 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5182 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5183 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5184 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5185 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5186 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5187 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5188 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5190 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5194 if (adev->pm.pcie_mlw_mask == 0) {
5195 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5196 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5198 switch (platform_link_width) {
5200 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5201 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5202 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5203 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5204 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5205 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5206 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5209 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5210 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5211 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5212 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5213 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5214 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5217 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5218 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5219 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5220 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5221 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5224 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5225 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5226 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5227 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5230 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5231 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5232 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5235 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5236 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5239 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5248 int amdgpu_device_baco_enter(struct drm_device *dev)
5250 struct amdgpu_device *adev = drm_to_adev(dev);
5251 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5253 if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5256 if (ras && adev->ras_enabled &&
5257 adev->nbio.funcs->enable_doorbell_interrupt)
5258 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5260 return amdgpu_dpm_baco_enter(adev);
5263 int amdgpu_device_baco_exit(struct drm_device *dev)
5265 struct amdgpu_device *adev = drm_to_adev(dev);
5266 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5269 if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5272 ret = amdgpu_dpm_baco_exit(adev);
5276 if (ras && adev->ras_enabled &&
5277 adev->nbio.funcs->enable_doorbell_interrupt)
5278 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5283 static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
5287 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5288 struct amdgpu_ring *ring = adev->rings[i];
5290 if (!ring || !ring->sched.thread)
5293 cancel_delayed_work_sync(&ring->sched.work_tdr);
5298 * amdgpu_pci_error_detected - Called when a PCI error is detected.
5299 * @pdev: PCI device struct
5300 * @state: PCI channel state
5302 * Description: Called when a PCI error is detected.
5304 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5306 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5308 struct drm_device *dev = pci_get_drvdata(pdev);
5309 struct amdgpu_device *adev = drm_to_adev(dev);
5312 DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5314 if (adev->gmc.xgmi.num_physical_nodes > 1) {
5315 DRM_WARN("No support for XGMI hive yet...");
5316 return PCI_ERS_RESULT_DISCONNECT;
5320 case pci_channel_io_normal:
5321 return PCI_ERS_RESULT_CAN_RECOVER;
5322 /* Fatal error, prepare for slot reset */
5323 case pci_channel_io_frozen:
5325 * Cancel and wait for all TDRs in progress if failing to
5326 * set adev->in_gpu_reset in amdgpu_device_lock_adev
5328 * Locking adev->reset_sem will prevent any external access
5329 * to GPU during PCI error recovery
5331 while (!amdgpu_device_lock_adev(adev, NULL))
5332 amdgpu_cancel_all_tdr(adev);
5335 * Block any work scheduling as we do for regular GPU reset
5336 * for the duration of the recovery
5338 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5339 struct amdgpu_ring *ring = adev->rings[i];
5341 if (!ring || !ring->sched.thread)
5344 drm_sched_stop(&ring->sched, NULL);
5346 atomic_inc(&adev->gpu_reset_counter);
5347 return PCI_ERS_RESULT_NEED_RESET;
5348 case pci_channel_io_perm_failure:
5349 /* Permanent error, prepare for device removal */
5350 return PCI_ERS_RESULT_DISCONNECT;
5353 return PCI_ERS_RESULT_NEED_RESET;
5357 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5358 * @pdev: pointer to PCI device
5360 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5363 DRM_INFO("PCI error: mmio enabled callback!!\n");
5365 /* TODO - dump whatever for debugging purposes */
5367 /* This called only if amdgpu_pci_error_detected returns
5368 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5369 * works, no need to reset slot.
5372 return PCI_ERS_RESULT_RECOVERED;
5376 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5377 * @pdev: PCI device struct
5379 * Description: This routine is called by the pci error recovery
5380 * code after the PCI slot has been reset, just before we
5381 * should resume normal operations.
5383 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5385 struct drm_device *dev = pci_get_drvdata(pdev);
5386 struct amdgpu_device *adev = drm_to_adev(dev);
5388 struct amdgpu_reset_context reset_context;
5390 struct list_head device_list;
5392 DRM_INFO("PCI error: slot reset callback!!\n");
5394 memset(&reset_context, 0, sizeof(reset_context));
5396 INIT_LIST_HEAD(&device_list);
5397 list_add_tail(&adev->reset_list, &device_list);
5399 /* wait for asic to come out of reset */
5402 /* Restore PCI confspace */
5403 amdgpu_device_load_pci_state(pdev);
5405 /* confirm ASIC came out of reset */
5406 for (i = 0; i < adev->usec_timeout; i++) {
5407 memsize = amdgpu_asic_get_config_memsize(adev);
5409 if (memsize != 0xffffffff)
5413 if (memsize == 0xffffffff) {
5418 reset_context.method = AMD_RESET_METHOD_NONE;
5419 reset_context.reset_req_dev = adev;
5420 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5421 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5423 adev->no_hw_access = true;
5424 r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5425 adev->no_hw_access = false;
5429 r = amdgpu_do_asic_reset(&device_list, &reset_context);
5433 if (amdgpu_device_cache_pci_state(adev->pdev))
5434 pci_restore_state(adev->pdev);
5436 DRM_INFO("PCIe error recovery succeeded\n");
5438 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5439 amdgpu_device_unlock_adev(adev);
5442 return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5446 * amdgpu_pci_resume() - resume normal ops after PCI reset
5447 * @pdev: pointer to PCI device
5449 * Called when the error recovery driver tells us that its
5450 * OK to resume normal operation.
5452 void amdgpu_pci_resume(struct pci_dev *pdev)
5454 struct drm_device *dev = pci_get_drvdata(pdev);
5455 struct amdgpu_device *adev = drm_to_adev(dev);
5459 DRM_INFO("PCI error: resume callback!!\n");
5461 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5462 struct amdgpu_ring *ring = adev->rings[i];
5464 if (!ring || !ring->sched.thread)
5468 drm_sched_resubmit_jobs(&ring->sched);
5469 drm_sched_start(&ring->sched, true);
5472 amdgpu_device_unlock_adev(adev);
5475 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5477 struct drm_device *dev = pci_get_drvdata(pdev);
5478 struct amdgpu_device *adev = drm_to_adev(dev);
5481 r = pci_save_state(pdev);
5483 kfree(adev->pci_state);
5485 adev->pci_state = pci_store_saved_state(pdev);
5487 if (!adev->pci_state) {
5488 DRM_ERROR("Failed to store PCI saved state");
5492 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5499 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5501 struct drm_device *dev = pci_get_drvdata(pdev);
5502 struct amdgpu_device *adev = drm_to_adev(dev);
5505 if (!adev->pci_state)
5508 r = pci_load_saved_state(pdev, adev->pci_state);
5511 pci_restore_state(pdev);
5513 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5520 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5521 struct amdgpu_ring *ring)
5523 #ifdef CONFIG_X86_64
5524 if (adev->flags & AMD_IS_APU)
5527 if (adev->gmc.xgmi.connected_to_cpu)
5530 if (ring && ring->funcs->emit_hdp_flush)
5531 amdgpu_ring_emit_hdp_flush(ring);
5533 amdgpu_asic_flush_hdp(adev, ring);
5536 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5537 struct amdgpu_ring *ring)
5539 #ifdef CONFIG_X86_64
5540 if (adev->flags & AMD_IS_APU)
5543 if (adev->gmc.xgmi.connected_to_cpu)
5546 amdgpu_asic_invalidate_hdp(adev, ring);