2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
35 #include <linux/devcoredump.h>
36 #include <generated/utsrelease.h>
37 #include <linux/pci-p2pdma.h>
38 #include <linux/apple-gmux.h>
40 #include <drm/drm_aperture.h>
41 #include <drm/drm_atomic_helper.h>
42 #include <drm/drm_crtc_helper.h>
43 #include <drm/drm_fb_helper.h>
44 #include <drm/drm_probe_helper.h>
45 #include <drm/amdgpu_drm.h>
46 #include <linux/vgaarb.h>
47 #include <linux/vga_switcheroo.h>
48 #include <linux/efi.h>
50 #include "amdgpu_trace.h"
51 #include "amdgpu_i2c.h"
53 #include "amdgpu_atombios.h"
54 #include "amdgpu_atomfirmware.h"
56 #ifdef CONFIG_DRM_AMDGPU_SI
59 #ifdef CONFIG_DRM_AMDGPU_CIK
65 #include "bif/bif_4_1_d.h"
66 #include <linux/firmware.h>
67 #include "amdgpu_vf_error.h"
69 #include "amdgpu_amdkfd.h"
70 #include "amdgpu_pm.h"
72 #include "amdgpu_xgmi.h"
73 #include "amdgpu_ras.h"
74 #include "amdgpu_pmu.h"
75 #include "amdgpu_fru_eeprom.h"
76 #include "amdgpu_reset.h"
78 #include <linux/suspend.h>
79 #include <drm/task_barrier.h>
80 #include <linux/pm_runtime.h>
82 #include <drm/drm_drv.h>
84 #if IS_ENABLED(CONFIG_X86)
85 #include <asm/intel-family.h>
88 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
89 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
90 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
91 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
92 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
93 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
94 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
96 #define AMDGPU_RESUME_MS 2000
97 #define AMDGPU_MAX_RETRY_LIMIT 2
98 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
100 static const struct drm_driver amdgpu_kms_driver;
102 const char *amdgpu_asic_name[] = {
144 * DOC: pcie_replay_count
146 * The amdgpu driver provides a sysfs API for reporting the total number
147 * of PCIe replays (NAKs)
148 * The file pcie_replay_count is used for this and returns the total
149 * number of replays as a sum of the NAKs generated and NAKs received
152 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
153 struct device_attribute *attr, char *buf)
155 struct drm_device *ddev = dev_get_drvdata(dev);
156 struct amdgpu_device *adev = drm_to_adev(ddev);
157 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
159 return sysfs_emit(buf, "%llu\n", cnt);
162 static DEVICE_ATTR(pcie_replay_count, 0444,
163 amdgpu_device_get_pcie_replay_count, NULL);
165 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
169 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
171 * @dev: drm_device pointer
173 * Returns true if the device is a dGPU with ATPX power control,
174 * otherwise return false.
176 bool amdgpu_device_supports_px(struct drm_device *dev)
178 struct amdgpu_device *adev = drm_to_adev(dev);
180 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
186 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
188 * @dev: drm_device pointer
190 * Returns true if the device is a dGPU with ACPI power control,
191 * otherwise return false.
193 bool amdgpu_device_supports_boco(struct drm_device *dev)
195 struct amdgpu_device *adev = drm_to_adev(dev);
198 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
204 * amdgpu_device_supports_baco - Does the device support BACO
206 * @dev: drm_device pointer
208 * Returns true if the device supporte BACO,
209 * otherwise return false.
211 bool amdgpu_device_supports_baco(struct drm_device *dev)
213 struct amdgpu_device *adev = drm_to_adev(dev);
215 return amdgpu_asic_supports_baco(adev);
219 * amdgpu_device_supports_smart_shift - Is the device dGPU with
220 * smart shift support
222 * @dev: drm_device pointer
224 * Returns true if the device is a dGPU with Smart Shift support,
225 * otherwise returns false.
227 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
229 return (amdgpu_device_supports_boco(dev) &&
230 amdgpu_acpi_is_power_shift_control_supported());
234 * VRAM access helper functions
238 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
240 * @adev: amdgpu_device pointer
241 * @pos: offset of the buffer in vram
242 * @buf: virtual address of the buffer in system memory
243 * @size: read/write size, sizeof(@buf) must > @size
244 * @write: true - write to vram, otherwise - read from vram
246 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
247 void *buf, size_t size, bool write)
250 uint32_t hi = ~0, tmp = 0;
251 uint32_t *data = buf;
255 if (!drm_dev_enter(adev_to_drm(adev), &idx))
258 BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
260 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
261 for (last = pos + size; pos < last; pos += 4) {
264 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
266 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
270 WREG32_NO_KIQ(mmMM_DATA, *data++);
272 *data++ = RREG32_NO_KIQ(mmMM_DATA);
275 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
280 * amdgpu_device_aper_access - access vram by vram aperature
282 * @adev: amdgpu_device pointer
283 * @pos: offset of the buffer in vram
284 * @buf: virtual address of the buffer in system memory
285 * @size: read/write size, sizeof(@buf) must > @size
286 * @write: true - write to vram, otherwise - read from vram
288 * The return value means how many bytes have been transferred.
290 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
291 void *buf, size_t size, bool write)
298 if (!adev->mman.aper_base_kaddr)
301 last = min(pos + size, adev->gmc.visible_vram_size);
303 addr = adev->mman.aper_base_kaddr + pos;
307 memcpy_toio(addr, buf, count);
308 /* Make sure HDP write cache flush happens without any reordering
309 * after the system memory contents are sent over PCIe device
312 amdgpu_device_flush_hdp(adev, NULL);
314 amdgpu_device_invalidate_hdp(adev, NULL);
315 /* Make sure HDP read cache is invalidated before issuing a read
319 memcpy_fromio(buf, addr, count);
331 * amdgpu_device_vram_access - read/write a buffer in vram
333 * @adev: amdgpu_device pointer
334 * @pos: offset of the buffer in vram
335 * @buf: virtual address of the buffer in system memory
336 * @size: read/write size, sizeof(@buf) must > @size
337 * @write: true - write to vram, otherwise - read from vram
339 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
340 void *buf, size_t size, bool write)
344 /* try to using vram apreature to access vram first */
345 count = amdgpu_device_aper_access(adev, pos, buf, size, write);
348 /* using MM to access rest vram */
351 amdgpu_device_mm_access(adev, pos, buf, size, write);
356 * register access helper functions.
359 /* Check if hw access should be skipped because of hotplug or device error */
360 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
362 if (adev->no_hw_access)
365 #ifdef CONFIG_LOCKDEP
367 * This is a bit complicated to understand, so worth a comment. What we assert
368 * here is that the GPU reset is not running on another thread in parallel.
370 * For this we trylock the read side of the reset semaphore, if that succeeds
371 * we know that the reset is not running in paralell.
373 * If the trylock fails we assert that we are either already holding the read
374 * side of the lock or are the reset thread itself and hold the write side of
378 if (down_read_trylock(&adev->reset_domain->sem))
379 up_read(&adev->reset_domain->sem);
381 lockdep_assert_held(&adev->reset_domain->sem);
388 * amdgpu_device_rreg - read a memory mapped IO or indirect register
390 * @adev: amdgpu_device pointer
391 * @reg: dword aligned register offset
392 * @acc_flags: access flags which require special behavior
394 * Returns the 32 bit value from the offset specified.
396 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
397 uint32_t reg, uint32_t acc_flags)
401 if (amdgpu_device_skip_hw_access(adev))
404 if ((reg * 4) < adev->rmmio_size) {
405 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
406 amdgpu_sriov_runtime(adev) &&
407 down_read_trylock(&adev->reset_domain->sem)) {
408 ret = amdgpu_kiq_rreg(adev, reg);
409 up_read(&adev->reset_domain->sem);
411 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
414 ret = adev->pcie_rreg(adev, reg * 4);
417 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
423 * MMIO register read with bytes helper functions
424 * @offset:bytes offset from MMIO start
428 * amdgpu_mm_rreg8 - read a memory mapped IO register
430 * @adev: amdgpu_device pointer
431 * @offset: byte aligned register offset
433 * Returns the 8 bit value from the offset specified.
435 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
437 if (amdgpu_device_skip_hw_access(adev))
440 if (offset < adev->rmmio_size)
441 return (readb(adev->rmmio + offset));
446 * MMIO register write with bytes helper functions
447 * @offset:bytes offset from MMIO start
448 * @value: the value want to be written to the register
452 * amdgpu_mm_wreg8 - read a memory mapped IO register
454 * @adev: amdgpu_device pointer
455 * @offset: byte aligned register offset
456 * @value: 8 bit value to write
458 * Writes the value specified to the offset specified.
460 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
462 if (amdgpu_device_skip_hw_access(adev))
465 if (offset < adev->rmmio_size)
466 writeb(value, adev->rmmio + offset);
472 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
474 * @adev: amdgpu_device pointer
475 * @reg: dword aligned register offset
476 * @v: 32 bit value to write to the register
477 * @acc_flags: access flags which require special behavior
479 * Writes the value specified to the offset specified.
481 void amdgpu_device_wreg(struct amdgpu_device *adev,
482 uint32_t reg, uint32_t v,
485 if (amdgpu_device_skip_hw_access(adev))
488 if ((reg * 4) < adev->rmmio_size) {
489 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
490 amdgpu_sriov_runtime(adev) &&
491 down_read_trylock(&adev->reset_domain->sem)) {
492 amdgpu_kiq_wreg(adev, reg, v);
493 up_read(&adev->reset_domain->sem);
495 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
498 adev->pcie_wreg(adev, reg * 4, v);
501 trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
505 * amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if in range
507 * @adev: amdgpu_device pointer
508 * @reg: mmio/rlc register
510 * @xcc_id: xcc accelerated compute core id
512 * this function is invoked only for the debugfs register access
514 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
515 uint32_t reg, uint32_t v,
518 if (amdgpu_device_skip_hw_access(adev))
521 if (amdgpu_sriov_fullaccess(adev) &&
522 adev->gfx.rlc.funcs &&
523 adev->gfx.rlc.funcs->is_rlcg_access_range) {
524 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
525 return amdgpu_sriov_wreg(adev, reg, v, 0, 0, xcc_id);
526 } else if ((reg * 4) >= adev->rmmio_size) {
527 adev->pcie_wreg(adev, reg * 4, v);
529 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
534 * amdgpu_device_indirect_rreg - read an indirect register
536 * @adev: amdgpu_device pointer
537 * @reg_addr: indirect register address to read from
539 * Returns the value of indirect register @reg_addr
541 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
544 unsigned long flags, pcie_index, pcie_data;
545 void __iomem *pcie_index_offset;
546 void __iomem *pcie_data_offset;
549 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
550 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
552 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
553 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
554 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
556 writel(reg_addr, pcie_index_offset);
557 readl(pcie_index_offset);
558 r = readl(pcie_data_offset);
559 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
564 u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
567 unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
569 void __iomem *pcie_index_offset;
570 void __iomem *pcie_index_hi_offset;
571 void __iomem *pcie_data_offset;
573 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
574 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
575 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
576 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
580 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
581 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
582 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
583 if (pcie_index_hi != 0)
584 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
587 writel(reg_addr, pcie_index_offset);
588 readl(pcie_index_offset);
589 if (pcie_index_hi != 0) {
590 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
591 readl(pcie_index_hi_offset);
593 r = readl(pcie_data_offset);
595 /* clear the high bits */
596 if (pcie_index_hi != 0) {
597 writel(0, pcie_index_hi_offset);
598 readl(pcie_index_hi_offset);
601 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
607 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
609 * @adev: amdgpu_device pointer
610 * @reg_addr: indirect register address to read from
612 * Returns the value of indirect register @reg_addr
614 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
617 unsigned long flags, pcie_index, pcie_data;
618 void __iomem *pcie_index_offset;
619 void __iomem *pcie_data_offset;
622 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
623 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
625 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
626 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
627 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
629 /* read low 32 bits */
630 writel(reg_addr, pcie_index_offset);
631 readl(pcie_index_offset);
632 r = readl(pcie_data_offset);
633 /* read high 32 bits */
634 writel(reg_addr + 4, pcie_index_offset);
635 readl(pcie_index_offset);
636 r |= ((u64)readl(pcie_data_offset) << 32);
637 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
642 u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev,
645 unsigned long flags, pcie_index, pcie_data;
646 unsigned long pcie_index_hi = 0;
647 void __iomem *pcie_index_offset;
648 void __iomem *pcie_index_hi_offset;
649 void __iomem *pcie_data_offset;
652 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
653 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
654 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
655 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
657 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
658 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
659 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
660 if (pcie_index_hi != 0)
661 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
664 /* read low 32 bits */
665 writel(reg_addr, pcie_index_offset);
666 readl(pcie_index_offset);
667 if (pcie_index_hi != 0) {
668 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
669 readl(pcie_index_hi_offset);
671 r = readl(pcie_data_offset);
672 /* read high 32 bits */
673 writel(reg_addr + 4, pcie_index_offset);
674 readl(pcie_index_offset);
675 if (pcie_index_hi != 0) {
676 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
677 readl(pcie_index_hi_offset);
679 r |= ((u64)readl(pcie_data_offset) << 32);
681 /* clear the high bits */
682 if (pcie_index_hi != 0) {
683 writel(0, pcie_index_hi_offset);
684 readl(pcie_index_hi_offset);
687 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
693 * amdgpu_device_indirect_wreg - write an indirect register address
695 * @adev: amdgpu_device pointer
696 * @reg_addr: indirect register offset
697 * @reg_data: indirect register data
700 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
701 u32 reg_addr, u32 reg_data)
703 unsigned long flags, pcie_index, pcie_data;
704 void __iomem *pcie_index_offset;
705 void __iomem *pcie_data_offset;
707 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
708 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
710 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
711 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
712 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
714 writel(reg_addr, pcie_index_offset);
715 readl(pcie_index_offset);
716 writel(reg_data, pcie_data_offset);
717 readl(pcie_data_offset);
718 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
721 void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
722 u64 reg_addr, u32 reg_data)
724 unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
725 void __iomem *pcie_index_offset;
726 void __iomem *pcie_index_hi_offset;
727 void __iomem *pcie_data_offset;
729 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
730 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
731 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
732 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
736 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
737 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
738 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
739 if (pcie_index_hi != 0)
740 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
743 writel(reg_addr, pcie_index_offset);
744 readl(pcie_index_offset);
745 if (pcie_index_hi != 0) {
746 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
747 readl(pcie_index_hi_offset);
749 writel(reg_data, pcie_data_offset);
750 readl(pcie_data_offset);
752 /* clear the high bits */
753 if (pcie_index_hi != 0) {
754 writel(0, pcie_index_hi_offset);
755 readl(pcie_index_hi_offset);
758 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
762 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
764 * @adev: amdgpu_device pointer
765 * @reg_addr: indirect register offset
766 * @reg_data: indirect register data
769 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
770 u32 reg_addr, u64 reg_data)
772 unsigned long flags, pcie_index, pcie_data;
773 void __iomem *pcie_index_offset;
774 void __iomem *pcie_data_offset;
776 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
777 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
779 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
780 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
781 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
783 /* write low 32 bits */
784 writel(reg_addr, pcie_index_offset);
785 readl(pcie_index_offset);
786 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
787 readl(pcie_data_offset);
788 /* write high 32 bits */
789 writel(reg_addr + 4, pcie_index_offset);
790 readl(pcie_index_offset);
791 writel((u32)(reg_data >> 32), pcie_data_offset);
792 readl(pcie_data_offset);
793 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
796 void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev,
797 u64 reg_addr, u64 reg_data)
799 unsigned long flags, pcie_index, pcie_data;
800 unsigned long pcie_index_hi = 0;
801 void __iomem *pcie_index_offset;
802 void __iomem *pcie_index_hi_offset;
803 void __iomem *pcie_data_offset;
805 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
806 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
807 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
808 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
810 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
811 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
812 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
813 if (pcie_index_hi != 0)
814 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
817 /* write low 32 bits */
818 writel(reg_addr, pcie_index_offset);
819 readl(pcie_index_offset);
820 if (pcie_index_hi != 0) {
821 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
822 readl(pcie_index_hi_offset);
824 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
825 readl(pcie_data_offset);
826 /* write high 32 bits */
827 writel(reg_addr + 4, pcie_index_offset);
828 readl(pcie_index_offset);
829 if (pcie_index_hi != 0) {
830 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
831 readl(pcie_index_hi_offset);
833 writel((u32)(reg_data >> 32), pcie_data_offset);
834 readl(pcie_data_offset);
836 /* clear the high bits */
837 if (pcie_index_hi != 0) {
838 writel(0, pcie_index_hi_offset);
839 readl(pcie_index_hi_offset);
842 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
846 * amdgpu_device_get_rev_id - query device rev_id
848 * @adev: amdgpu_device pointer
850 * Return device rev_id
852 u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
854 return adev->nbio.funcs->get_rev_id(adev);
858 * amdgpu_invalid_rreg - dummy reg read function
860 * @adev: amdgpu_device pointer
861 * @reg: offset of register
863 * Dummy register read function. Used for register blocks
864 * that certain asics don't have (all asics).
865 * Returns the value in the register.
867 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
869 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
874 static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg)
876 DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
882 * amdgpu_invalid_wreg - dummy reg write function
884 * @adev: amdgpu_device pointer
885 * @reg: offset of register
886 * @v: value to write to the register
888 * Dummy register read function. Used for register blocks
889 * that certain asics don't have (all asics).
891 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
893 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
898 static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v)
900 DRM_ERROR("Invalid callback to write register 0x%llX with 0x%08X\n",
906 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
908 * @adev: amdgpu_device pointer
909 * @reg: offset of register
911 * Dummy register read function. Used for register blocks
912 * that certain asics don't have (all asics).
913 * Returns the value in the register.
915 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
917 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
922 static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t reg)
924 DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
930 * amdgpu_invalid_wreg64 - dummy reg write function
932 * @adev: amdgpu_device pointer
933 * @reg: offset of register
934 * @v: value to write to the register
936 * Dummy register read function. Used for register blocks
937 * that certain asics don't have (all asics).
939 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
941 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
946 static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, uint64_t v)
948 DRM_ERROR("Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n",
954 * amdgpu_block_invalid_rreg - dummy reg read function
956 * @adev: amdgpu_device pointer
957 * @block: offset of instance
958 * @reg: offset of register
960 * Dummy register read function. Used for register blocks
961 * that certain asics don't have (all asics).
962 * Returns the value in the register.
964 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
965 uint32_t block, uint32_t reg)
967 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
974 * amdgpu_block_invalid_wreg - dummy reg write function
976 * @adev: amdgpu_device pointer
977 * @block: offset of instance
978 * @reg: offset of register
979 * @v: value to write to the register
981 * Dummy register read function. Used for register blocks
982 * that certain asics don't have (all asics).
984 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
986 uint32_t reg, uint32_t v)
988 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
994 * amdgpu_device_asic_init - Wrapper for atom asic_init
996 * @adev: amdgpu_device pointer
998 * Does any asic specific work and then calls atom asic init.
1000 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
1004 amdgpu_asic_pre_asic_init(adev);
1006 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
1007 amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
1008 amdgpu_psp_wait_for_bootloader(adev);
1009 ret = amdgpu_atomfirmware_asic_init(adev, true);
1012 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
1019 * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
1021 * @adev: amdgpu_device pointer
1023 * Allocates a scratch page of VRAM for use by various things in the
1026 static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
1028 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
1029 AMDGPU_GEM_DOMAIN_VRAM |
1030 AMDGPU_GEM_DOMAIN_GTT,
1031 &adev->mem_scratch.robj,
1032 &adev->mem_scratch.gpu_addr,
1033 (void **)&adev->mem_scratch.ptr);
1037 * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
1039 * @adev: amdgpu_device pointer
1041 * Frees the VRAM scratch page.
1043 static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
1045 amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
1049 * amdgpu_device_program_register_sequence - program an array of registers.
1051 * @adev: amdgpu_device pointer
1052 * @registers: pointer to the register array
1053 * @array_size: size of the register array
1055 * Programs an array or registers with and or masks.
1056 * This is a helper for setting golden registers.
1058 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
1059 const u32 *registers,
1060 const u32 array_size)
1062 u32 tmp, reg, and_mask, or_mask;
1068 for (i = 0; i < array_size; i += 3) {
1069 reg = registers[i + 0];
1070 and_mask = registers[i + 1];
1071 or_mask = registers[i + 2];
1073 if (and_mask == 0xffffffff) {
1078 if (adev->family >= AMDGPU_FAMILY_AI)
1079 tmp |= (or_mask & and_mask);
1088 * amdgpu_device_pci_config_reset - reset the GPU
1090 * @adev: amdgpu_device pointer
1092 * Resets the GPU using the pci config reset sequence.
1093 * Only applicable to asics prior to vega10.
1095 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1097 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1101 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1103 * @adev: amdgpu_device pointer
1105 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1107 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1109 return pci_reset_function(adev->pdev);
1113 * amdgpu_device_wb_*()
1114 * Writeback is the method by which the GPU updates special pages in memory
1115 * with the status of certain GPU events (fences, ring pointers,etc.).
1119 * amdgpu_device_wb_fini - Disable Writeback and free memory
1121 * @adev: amdgpu_device pointer
1123 * Disables Writeback and frees the Writeback memory (all asics).
1124 * Used at driver shutdown.
1126 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1128 if (adev->wb.wb_obj) {
1129 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1131 (void **)&adev->wb.wb);
1132 adev->wb.wb_obj = NULL;
1137 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1139 * @adev: amdgpu_device pointer
1141 * Initializes writeback and allocates writeback memory (all asics).
1142 * Used at driver startup.
1143 * Returns 0 on success or an -error on failure.
1145 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1149 if (adev->wb.wb_obj == NULL) {
1150 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1151 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1152 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1153 &adev->wb.wb_obj, &adev->wb.gpu_addr,
1154 (void **)&adev->wb.wb);
1156 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1160 adev->wb.num_wb = AMDGPU_MAX_WB;
1161 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1163 /* clear wb memory */
1164 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1171 * amdgpu_device_wb_get - Allocate a wb entry
1173 * @adev: amdgpu_device pointer
1176 * Allocate a wb slot for use by the driver (all asics).
1177 * Returns 0 on success or -EINVAL on failure.
1179 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1181 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1183 if (offset < adev->wb.num_wb) {
1184 __set_bit(offset, adev->wb.used);
1185 *wb = offset << 3; /* convert to dw offset */
1193 * amdgpu_device_wb_free - Free a wb entry
1195 * @adev: amdgpu_device pointer
1198 * Free a wb slot allocated for use by the driver (all asics)
1200 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1203 if (wb < adev->wb.num_wb)
1204 __clear_bit(wb, adev->wb.used);
1208 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1210 * @adev: amdgpu_device pointer
1212 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1213 * to fail, but if any of the BARs is not accessible after the size we abort
1214 * driver loading by returning -ENODEV.
1216 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1218 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1219 struct pci_bus *root;
1220 struct resource *res;
1225 if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
1229 if (amdgpu_sriov_vf(adev))
1232 /* skip if the bios has already enabled large BAR */
1233 if (adev->gmc.real_vram_size &&
1234 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1237 /* Check if the root BUS has 64bit memory resources */
1238 root = adev->pdev->bus;
1239 while (root->parent)
1240 root = root->parent;
1242 pci_bus_for_each_resource(root, res, i) {
1243 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1244 res->start > 0x100000000ull)
1248 /* Trying to resize is pointless without a root hub window above 4GB */
1252 /* Limit the BAR size to what is available */
1253 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1256 /* Disable memory decoding while we change the BAR addresses and size */
1257 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1258 pci_write_config_word(adev->pdev, PCI_COMMAND,
1259 cmd & ~PCI_COMMAND_MEMORY);
1261 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1262 amdgpu_doorbell_fini(adev);
1263 if (adev->asic_type >= CHIP_BONAIRE)
1264 pci_release_resource(adev->pdev, 2);
1266 pci_release_resource(adev->pdev, 0);
1268 r = pci_resize_resource(adev->pdev, 0, rbar_size);
1270 DRM_INFO("Not enough PCI address space for a large BAR.");
1271 else if (r && r != -ENOTSUPP)
1272 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1274 pci_assign_unassigned_bus_resources(adev->pdev->bus);
1276 /* When the doorbell or fb BAR isn't available we have no chance of
1279 r = amdgpu_doorbell_init(adev);
1280 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1283 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1288 static bool amdgpu_device_read_bios(struct amdgpu_device *adev)
1290 if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
1297 * GPU helpers function.
1300 * amdgpu_device_need_post - check if the hw need post or not
1302 * @adev: amdgpu_device pointer
1304 * Check if the asic has been initialized (all asics) at driver startup
1305 * or post is needed if hw reset is performed.
1306 * Returns true if need or false if not.
1308 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1312 if (amdgpu_sriov_vf(adev))
1315 if (!amdgpu_device_read_bios(adev))
1318 if (amdgpu_passthrough(adev)) {
1319 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1320 * some old smc fw still need driver do vPost otherwise gpu hang, while
1321 * those smc fw version above 22.15 doesn't have this flaw, so we force
1322 * vpost executed for smc version below 22.15
1324 if (adev->asic_type == CHIP_FIJI) {
1328 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1329 /* force vPost if error occured */
1333 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1334 if (fw_ver < 0x00160e00)
1339 /* Don't post if we need to reset whole hive on init */
1340 if (adev->gmc.xgmi.pending_reset)
1343 if (adev->has_hw_reset) {
1344 adev->has_hw_reset = false;
1348 /* bios scratch used on CIK+ */
1349 if (adev->asic_type >= CHIP_BONAIRE)
1350 return amdgpu_atombios_scratch_need_asic_init(adev);
1352 /* check MEM_SIZE for older asics */
1353 reg = amdgpu_asic_get_config_memsize(adev);
1355 if ((reg != 0) && (reg != 0xffffffff))
1362 * Check whether seamless boot is supported.
1364 * So far we only support seamless boot on DCE 3.0 or later.
1365 * If users report that it works on older ASICS as well, we may
1368 bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev)
1370 switch (amdgpu_seamless) {
1378 DRM_ERROR("Invalid value for amdgpu.seamless: %d\n",
1383 if (!(adev->flags & AMD_IS_APU))
1386 if (adev->mman.keep_stolen_vga_memory)
1389 return adev->ip_versions[DCE_HWIP][0] >= IP_VERSION(3, 0, 0);
1393 * Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
1394 * speed switching. Until we have confirmation from Intel that a specific host
1395 * supports it, it's safer that we keep it disabled for all.
1397 * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
1398 * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
1400 bool amdgpu_device_pcie_dynamic_switching_supported(void)
1402 #if IS_ENABLED(CONFIG_X86)
1403 struct cpuinfo_x86 *c = &cpu_data(0);
1405 if (c->x86_vendor == X86_VENDOR_INTEL)
1412 * amdgpu_device_should_use_aspm - check if the device should program ASPM
1414 * @adev: amdgpu_device pointer
1416 * Confirm whether the module parameter and pcie bridge agree that ASPM should
1417 * be set for this device.
1419 * Returns true if it should be used or false if not.
1421 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1423 switch (amdgpu_aspm) {
1433 return pcie_aspm_enabled(adev->pdev);
1436 bool amdgpu_device_aspm_support_quirk(void)
1438 #if IS_ENABLED(CONFIG_X86)
1439 struct cpuinfo_x86 *c = &cpu_data(0);
1441 return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
1447 /* if we get transitioned to only one device, take VGA back */
1449 * amdgpu_device_vga_set_decode - enable/disable vga decode
1451 * @pdev: PCI device pointer
1452 * @state: enable/disable vga decode
1454 * Enable/disable vga decode (all asics).
1455 * Returns VGA resource flags.
1457 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1460 struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1462 amdgpu_asic_set_vga_state(adev, state);
1464 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1465 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1467 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1471 * amdgpu_device_check_block_size - validate the vm block size
1473 * @adev: amdgpu_device pointer
1475 * Validates the vm block size specified via module parameter.
1476 * The vm block size defines number of bits in page table versus page directory,
1477 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1478 * page table and the remaining bits are in the page directory.
1480 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1482 /* defines number of bits in page table versus page directory,
1483 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1484 * page table and the remaining bits are in the page directory
1486 if (amdgpu_vm_block_size == -1)
1489 if (amdgpu_vm_block_size < 9) {
1490 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1491 amdgpu_vm_block_size);
1492 amdgpu_vm_block_size = -1;
1497 * amdgpu_device_check_vm_size - validate the vm size
1499 * @adev: amdgpu_device pointer
1501 * Validates the vm size in GB specified via module parameter.
1502 * The VM size is the size of the GPU virtual memory space in GB.
1504 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1506 /* no need to check the default value */
1507 if (amdgpu_vm_size == -1)
1510 if (amdgpu_vm_size < 1) {
1511 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1513 amdgpu_vm_size = -1;
1517 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1520 bool is_os_64 = (sizeof(void *) == 8);
1521 uint64_t total_memory;
1522 uint64_t dram_size_seven_GB = 0x1B8000000;
1523 uint64_t dram_size_three_GB = 0xB8000000;
1525 if (amdgpu_smu_memory_pool_size == 0)
1529 DRM_WARN("Not 64-bit OS, feature not supported\n");
1533 total_memory = (uint64_t)si.totalram * si.mem_unit;
1535 if ((amdgpu_smu_memory_pool_size == 1) ||
1536 (amdgpu_smu_memory_pool_size == 2)) {
1537 if (total_memory < dram_size_three_GB)
1539 } else if ((amdgpu_smu_memory_pool_size == 4) ||
1540 (amdgpu_smu_memory_pool_size == 8)) {
1541 if (total_memory < dram_size_seven_GB)
1544 DRM_WARN("Smu memory pool size not supported\n");
1547 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1552 DRM_WARN("No enough system memory\n");
1554 adev->pm.smu_prv_buffer_size = 0;
1557 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1559 if (!(adev->flags & AMD_IS_APU) ||
1560 adev->asic_type < CHIP_RAVEN)
1563 switch (adev->asic_type) {
1565 if (adev->pdev->device == 0x15dd)
1566 adev->apu_flags |= AMD_APU_IS_RAVEN;
1567 if (adev->pdev->device == 0x15d8)
1568 adev->apu_flags |= AMD_APU_IS_PICASSO;
1571 if ((adev->pdev->device == 0x1636) ||
1572 (adev->pdev->device == 0x164c))
1573 adev->apu_flags |= AMD_APU_IS_RENOIR;
1575 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1578 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1580 case CHIP_YELLOW_CARP:
1582 case CHIP_CYAN_SKILLFISH:
1583 if ((adev->pdev->device == 0x13FE) ||
1584 (adev->pdev->device == 0x143F))
1585 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1595 * amdgpu_device_check_arguments - validate module params
1597 * @adev: amdgpu_device pointer
1599 * Validates certain module parameters and updates
1600 * the associated values used by the driver (all asics).
1602 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1604 if (amdgpu_sched_jobs < 4) {
1605 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1607 amdgpu_sched_jobs = 4;
1608 } else if (!is_power_of_2(amdgpu_sched_jobs)) {
1609 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1611 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1614 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1615 /* gart size must be greater or equal to 32M */
1616 dev_warn(adev->dev, "gart size (%d) too small\n",
1618 amdgpu_gart_size = -1;
1621 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1622 /* gtt size must be greater or equal to 32M */
1623 dev_warn(adev->dev, "gtt size (%d) too small\n",
1625 amdgpu_gtt_size = -1;
1628 /* valid range is between 4 and 9 inclusive */
1629 if (amdgpu_vm_fragment_size != -1 &&
1630 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1631 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1632 amdgpu_vm_fragment_size = -1;
1635 if (amdgpu_sched_hw_submission < 2) {
1636 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1637 amdgpu_sched_hw_submission);
1638 amdgpu_sched_hw_submission = 2;
1639 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1640 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1641 amdgpu_sched_hw_submission);
1642 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1645 if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1646 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1647 amdgpu_reset_method = -1;
1650 amdgpu_device_check_smu_prv_buffer_size(adev);
1652 amdgpu_device_check_vm_size(adev);
1654 amdgpu_device_check_block_size(adev);
1656 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1662 * amdgpu_switcheroo_set_state - set switcheroo state
1664 * @pdev: pci dev pointer
1665 * @state: vga_switcheroo state
1667 * Callback for the switcheroo driver. Suspends or resumes
1668 * the asics before or after it is powered up using ACPI methods.
1670 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1671 enum vga_switcheroo_state state)
1673 struct drm_device *dev = pci_get_drvdata(pdev);
1676 if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1679 if (state == VGA_SWITCHEROO_ON) {
1680 pr_info("switched on\n");
1681 /* don't suspend or resume card normally */
1682 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1684 pci_set_power_state(pdev, PCI_D0);
1685 amdgpu_device_load_pci_state(pdev);
1686 r = pci_enable_device(pdev);
1688 DRM_WARN("pci_enable_device failed (%d)\n", r);
1689 amdgpu_device_resume(dev, true);
1691 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1693 pr_info("switched off\n");
1694 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1695 amdgpu_device_suspend(dev, true);
1696 amdgpu_device_cache_pci_state(pdev);
1697 /* Shut down the device */
1698 pci_disable_device(pdev);
1699 pci_set_power_state(pdev, PCI_D3cold);
1700 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1705 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1707 * @pdev: pci dev pointer
1709 * Callback for the switcheroo driver. Check of the switcheroo
1710 * state can be changed.
1711 * Returns true if the state can be changed, false if not.
1713 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1715 struct drm_device *dev = pci_get_drvdata(pdev);
1718 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1719 * locking inversion with the driver load path. And the access here is
1720 * completely racy anyway. So don't bother with locking for now.
1722 return atomic_read(&dev->open_count) == 0;
1725 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1726 .set_gpu_state = amdgpu_switcheroo_set_state,
1728 .can_switch = amdgpu_switcheroo_can_switch,
1732 * amdgpu_device_ip_set_clockgating_state - set the CG state
1734 * @dev: amdgpu_device pointer
1735 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1736 * @state: clockgating state (gate or ungate)
1738 * Sets the requested clockgating state for all instances of
1739 * the hardware IP specified.
1740 * Returns the error code from the last instance.
1742 int amdgpu_device_ip_set_clockgating_state(void *dev,
1743 enum amd_ip_block_type block_type,
1744 enum amd_clockgating_state state)
1746 struct amdgpu_device *adev = dev;
1749 for (i = 0; i < adev->num_ip_blocks; i++) {
1750 if (!adev->ip_blocks[i].status.valid)
1752 if (adev->ip_blocks[i].version->type != block_type)
1754 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1756 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1757 (void *)adev, state);
1759 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1760 adev->ip_blocks[i].version->funcs->name, r);
1766 * amdgpu_device_ip_set_powergating_state - set the PG state
1768 * @dev: amdgpu_device pointer
1769 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1770 * @state: powergating state (gate or ungate)
1772 * Sets the requested powergating state for all instances of
1773 * the hardware IP specified.
1774 * Returns the error code from the last instance.
1776 int amdgpu_device_ip_set_powergating_state(void *dev,
1777 enum amd_ip_block_type block_type,
1778 enum amd_powergating_state state)
1780 struct amdgpu_device *adev = dev;
1783 for (i = 0; i < adev->num_ip_blocks; i++) {
1784 if (!adev->ip_blocks[i].status.valid)
1786 if (adev->ip_blocks[i].version->type != block_type)
1788 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1790 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1791 (void *)adev, state);
1793 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1794 adev->ip_blocks[i].version->funcs->name, r);
1800 * amdgpu_device_ip_get_clockgating_state - get the CG state
1802 * @adev: amdgpu_device pointer
1803 * @flags: clockgating feature flags
1805 * Walks the list of IPs on the device and updates the clockgating
1806 * flags for each IP.
1807 * Updates @flags with the feature flags for each hardware IP where
1808 * clockgating is enabled.
1810 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1815 for (i = 0; i < adev->num_ip_blocks; i++) {
1816 if (!adev->ip_blocks[i].status.valid)
1818 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1819 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1824 * amdgpu_device_ip_wait_for_idle - wait for idle
1826 * @adev: amdgpu_device pointer
1827 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1829 * Waits for the request hardware IP to be idle.
1830 * Returns 0 for success or a negative error code on failure.
1832 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1833 enum amd_ip_block_type block_type)
1837 for (i = 0; i < adev->num_ip_blocks; i++) {
1838 if (!adev->ip_blocks[i].status.valid)
1840 if (adev->ip_blocks[i].version->type == block_type) {
1841 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1852 * amdgpu_device_ip_is_idle - is the hardware IP idle
1854 * @adev: amdgpu_device pointer
1855 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1857 * Check if the hardware IP is idle or not.
1858 * Returns true if it the IP is idle, false if not.
1860 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1861 enum amd_ip_block_type block_type)
1865 for (i = 0; i < adev->num_ip_blocks; i++) {
1866 if (!adev->ip_blocks[i].status.valid)
1868 if (adev->ip_blocks[i].version->type == block_type)
1869 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1876 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1878 * @adev: amdgpu_device pointer
1879 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1881 * Returns a pointer to the hardware IP block structure
1882 * if it exists for the asic, otherwise NULL.
1884 struct amdgpu_ip_block *
1885 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1886 enum amd_ip_block_type type)
1890 for (i = 0; i < adev->num_ip_blocks; i++)
1891 if (adev->ip_blocks[i].version->type == type)
1892 return &adev->ip_blocks[i];
1898 * amdgpu_device_ip_block_version_cmp
1900 * @adev: amdgpu_device pointer
1901 * @type: enum amd_ip_block_type
1902 * @major: major version
1903 * @minor: minor version
1905 * return 0 if equal or greater
1906 * return 1 if smaller or the ip_block doesn't exist
1908 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1909 enum amd_ip_block_type type,
1910 u32 major, u32 minor)
1912 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1914 if (ip_block && ((ip_block->version->major > major) ||
1915 ((ip_block->version->major == major) &&
1916 (ip_block->version->minor >= minor))))
1923 * amdgpu_device_ip_block_add
1925 * @adev: amdgpu_device pointer
1926 * @ip_block_version: pointer to the IP to add
1928 * Adds the IP block driver information to the collection of IPs
1931 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1932 const struct amdgpu_ip_block_version *ip_block_version)
1934 if (!ip_block_version)
1937 switch (ip_block_version->type) {
1938 case AMD_IP_BLOCK_TYPE_VCN:
1939 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1942 case AMD_IP_BLOCK_TYPE_JPEG:
1943 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1950 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1951 ip_block_version->funcs->name);
1953 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1959 * amdgpu_device_enable_virtual_display - enable virtual display feature
1961 * @adev: amdgpu_device pointer
1963 * Enabled the virtual display feature if the user has enabled it via
1964 * the module parameter virtual_display. This feature provides a virtual
1965 * display hardware on headless boards or in virtualized environments.
1966 * This function parses and validates the configuration string specified by
1967 * the user and configues the virtual display configuration (number of
1968 * virtual connectors, crtcs, etc.) specified.
1970 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1972 adev->enable_virtual_display = false;
1974 if (amdgpu_virtual_display) {
1975 const char *pci_address_name = pci_name(adev->pdev);
1976 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1978 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1979 pciaddstr_tmp = pciaddstr;
1980 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1981 pciaddname = strsep(&pciaddname_tmp, ",");
1982 if (!strcmp("all", pciaddname)
1983 || !strcmp(pci_address_name, pciaddname)) {
1987 adev->enable_virtual_display = true;
1990 res = kstrtol(pciaddname_tmp, 10,
1998 adev->mode_info.num_crtc = num_crtc;
2000 adev->mode_info.num_crtc = 1;
2006 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
2007 amdgpu_virtual_display, pci_address_name,
2008 adev->enable_virtual_display, adev->mode_info.num_crtc);
2014 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
2016 if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
2017 adev->mode_info.num_crtc = 1;
2018 adev->enable_virtual_display = true;
2019 DRM_INFO("virtual_display:%d, num_crtc:%d\n",
2020 adev->enable_virtual_display, adev->mode_info.num_crtc);
2025 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
2027 * @adev: amdgpu_device pointer
2029 * Parses the asic configuration parameters specified in the gpu info
2030 * firmware and makes them availale to the driver for use in configuring
2032 * Returns 0 on success, -EINVAL on failure.
2034 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
2036 const char *chip_name;
2039 const struct gpu_info_firmware_header_v1_0 *hdr;
2041 adev->firmware.gpu_info_fw = NULL;
2043 if (adev->mman.discovery_bin) {
2045 * FIXME: The bounding box is still needed by Navi12, so
2046 * temporarily read it from gpu_info firmware. Should be dropped
2047 * when DAL no longer needs it.
2049 if (adev->asic_type != CHIP_NAVI12)
2053 switch (adev->asic_type) {
2057 chip_name = "vega10";
2060 chip_name = "vega12";
2063 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
2064 chip_name = "raven2";
2065 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
2066 chip_name = "picasso";
2068 chip_name = "raven";
2071 chip_name = "arcturus";
2074 chip_name = "navi12";
2078 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
2079 err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, fw_name);
2082 "Failed to get gpu_info firmware \"%s\"\n",
2087 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
2088 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
2090 switch (hdr->version_major) {
2093 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2094 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2095 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2098 * Should be droped when DAL no longer needs it.
2100 if (adev->asic_type == CHIP_NAVI12)
2101 goto parse_soc_bounding_box;
2103 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2104 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2105 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2106 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2107 adev->gfx.config.max_texture_channel_caches =
2108 le32_to_cpu(gpu_info_fw->gc_num_tccs);
2109 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2110 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2111 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2112 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2113 adev->gfx.config.double_offchip_lds_buf =
2114 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2115 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2116 adev->gfx.cu_info.max_waves_per_simd =
2117 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2118 adev->gfx.cu_info.max_scratch_slots_per_cu =
2119 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2120 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2121 if (hdr->version_minor >= 1) {
2122 const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2123 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2124 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2125 adev->gfx.config.num_sc_per_sh =
2126 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2127 adev->gfx.config.num_packer_per_sc =
2128 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2131 parse_soc_bounding_box:
2133 * soc bounding box info is not integrated in disocovery table,
2134 * we always need to parse it from gpu info firmware if needed.
2136 if (hdr->version_minor == 2) {
2137 const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2138 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2139 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2140 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2146 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2155 * amdgpu_device_ip_early_init - run early init for hardware IPs
2157 * @adev: amdgpu_device pointer
2159 * Early initialization pass for hardware IPs. The hardware IPs that make
2160 * up each asic are discovered each IP's early_init callback is run. This
2161 * is the first stage in initializing the asic.
2162 * Returns 0 on success, negative error code on failure.
2164 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2166 struct drm_device *dev = adev_to_drm(adev);
2167 struct pci_dev *parent;
2171 amdgpu_device_enable_virtual_display(adev);
2173 if (amdgpu_sriov_vf(adev)) {
2174 r = amdgpu_virt_request_full_gpu(adev, true);
2179 switch (adev->asic_type) {
2180 #ifdef CONFIG_DRM_AMDGPU_SI
2186 adev->family = AMDGPU_FAMILY_SI;
2187 r = si_set_ip_blocks(adev);
2192 #ifdef CONFIG_DRM_AMDGPU_CIK
2198 if (adev->flags & AMD_IS_APU)
2199 adev->family = AMDGPU_FAMILY_KV;
2201 adev->family = AMDGPU_FAMILY_CI;
2203 r = cik_set_ip_blocks(adev);
2211 case CHIP_POLARIS10:
2212 case CHIP_POLARIS11:
2213 case CHIP_POLARIS12:
2217 if (adev->flags & AMD_IS_APU)
2218 adev->family = AMDGPU_FAMILY_CZ;
2220 adev->family = AMDGPU_FAMILY_VI;
2222 r = vi_set_ip_blocks(adev);
2227 r = amdgpu_discovery_set_ip_blocks(adev);
2233 if (amdgpu_has_atpx() &&
2234 (amdgpu_is_atpx_hybrid() ||
2235 amdgpu_has_atpx_dgpu_power_cntl()) &&
2236 ((adev->flags & AMD_IS_APU) == 0) &&
2237 !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
2238 adev->flags |= AMD_IS_PX;
2240 if (!(adev->flags & AMD_IS_APU)) {
2241 parent = pcie_find_root_port(adev->pdev);
2242 adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2246 adev->pm.pp_feature = amdgpu_pp_feature_mask;
2247 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2248 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2249 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2250 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2253 for (i = 0; i < adev->num_ip_blocks; i++) {
2254 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2255 DRM_WARN("disabled ip block: %d <%s>\n",
2256 i, adev->ip_blocks[i].version->funcs->name);
2257 adev->ip_blocks[i].status.valid = false;
2259 if (adev->ip_blocks[i].version->funcs->early_init) {
2260 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2262 adev->ip_blocks[i].status.valid = false;
2264 DRM_ERROR("early_init of IP block <%s> failed %d\n",
2265 adev->ip_blocks[i].version->funcs->name, r);
2268 adev->ip_blocks[i].status.valid = true;
2271 adev->ip_blocks[i].status.valid = true;
2274 /* get the vbios after the asic_funcs are set up */
2275 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2276 r = amdgpu_device_parse_gpu_info_fw(adev);
2281 if (amdgpu_device_read_bios(adev)) {
2282 if (!amdgpu_get_bios(adev))
2285 r = amdgpu_atombios_init(adev);
2287 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2288 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2293 /*get pf2vf msg info at it's earliest time*/
2294 if (amdgpu_sriov_vf(adev))
2295 amdgpu_virt_init_data_exchange(adev);
2302 amdgpu_amdkfd_device_probe(adev);
2303 adev->cg_flags &= amdgpu_cg_mask;
2304 adev->pg_flags &= amdgpu_pg_mask;
2309 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2313 for (i = 0; i < adev->num_ip_blocks; i++) {
2314 if (!adev->ip_blocks[i].status.sw)
2316 if (adev->ip_blocks[i].status.hw)
2318 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2319 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2320 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2321 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2323 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2324 adev->ip_blocks[i].version->funcs->name, r);
2327 adev->ip_blocks[i].status.hw = true;
2334 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2338 for (i = 0; i < adev->num_ip_blocks; i++) {
2339 if (!adev->ip_blocks[i].status.sw)
2341 if (adev->ip_blocks[i].status.hw)
2343 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2345 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2346 adev->ip_blocks[i].version->funcs->name, r);
2349 adev->ip_blocks[i].status.hw = true;
2355 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2359 uint32_t smu_version;
2361 if (adev->asic_type >= CHIP_VEGA10) {
2362 for (i = 0; i < adev->num_ip_blocks; i++) {
2363 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2366 if (!adev->ip_blocks[i].status.sw)
2369 /* no need to do the fw loading again if already done*/
2370 if (adev->ip_blocks[i].status.hw == true)
2373 if (amdgpu_in_reset(adev) || adev->in_suspend) {
2374 r = adev->ip_blocks[i].version->funcs->resume(adev);
2376 DRM_ERROR("resume of IP block <%s> failed %d\n",
2377 adev->ip_blocks[i].version->funcs->name, r);
2381 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2383 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2384 adev->ip_blocks[i].version->funcs->name, r);
2389 adev->ip_blocks[i].status.hw = true;
2394 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2395 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2400 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2405 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2406 struct amdgpu_ring *ring = adev->rings[i];
2408 /* No need to setup the GPU scheduler for rings that don't need it */
2409 if (!ring || ring->no_scheduler)
2412 switch (ring->funcs->type) {
2413 case AMDGPU_RING_TYPE_GFX:
2414 timeout = adev->gfx_timeout;
2416 case AMDGPU_RING_TYPE_COMPUTE:
2417 timeout = adev->compute_timeout;
2419 case AMDGPU_RING_TYPE_SDMA:
2420 timeout = adev->sdma_timeout;
2423 timeout = adev->video_timeout;
2427 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
2428 ring->num_hw_submission, 0,
2429 timeout, adev->reset_domain->wq,
2430 ring->sched_score, ring->name,
2433 DRM_ERROR("Failed to create scheduler on ring %s.\n",
2439 amdgpu_xcp_update_partition_sched_list(adev);
2446 * amdgpu_device_ip_init - run init for hardware IPs
2448 * @adev: amdgpu_device pointer
2450 * Main initialization pass for hardware IPs. The list of all the hardware
2451 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2452 * are run. sw_init initializes the software state associated with each IP
2453 * and hw_init initializes the hardware associated with each IP.
2454 * Returns 0 on success, negative error code on failure.
2456 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2460 r = amdgpu_ras_init(adev);
2464 for (i = 0; i < adev->num_ip_blocks; i++) {
2465 if (!adev->ip_blocks[i].status.valid)
2467 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2469 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2470 adev->ip_blocks[i].version->funcs->name, r);
2473 adev->ip_blocks[i].status.sw = true;
2475 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2476 /* need to do common hw init early so everything is set up for gmc */
2477 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2479 DRM_ERROR("hw_init %d failed %d\n", i, r);
2482 adev->ip_blocks[i].status.hw = true;
2483 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2484 /* need to do gmc hw init early so we can allocate gpu mem */
2485 /* Try to reserve bad pages early */
2486 if (amdgpu_sriov_vf(adev))
2487 amdgpu_virt_exchange_data(adev);
2489 r = amdgpu_device_mem_scratch_init(adev);
2491 DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
2494 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2496 DRM_ERROR("hw_init %d failed %d\n", i, r);
2499 r = amdgpu_device_wb_init(adev);
2501 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2504 adev->ip_blocks[i].status.hw = true;
2506 /* right after GMC hw init, we create CSA */
2507 if (adev->gfx.mcbp) {
2508 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2509 AMDGPU_GEM_DOMAIN_VRAM |
2510 AMDGPU_GEM_DOMAIN_GTT,
2513 DRM_ERROR("allocate CSA failed %d\n", r);
2520 if (amdgpu_sriov_vf(adev))
2521 amdgpu_virt_init_data_exchange(adev);
2523 r = amdgpu_ib_pool_init(adev);
2525 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2526 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2530 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2534 r = amdgpu_device_ip_hw_init_phase1(adev);
2538 r = amdgpu_device_fw_loading(adev);
2542 r = amdgpu_device_ip_hw_init_phase2(adev);
2547 * retired pages will be loaded from eeprom and reserved here,
2548 * it should be called after amdgpu_device_ip_hw_init_phase2 since
2549 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2550 * for I2C communication which only true at this point.
2552 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2553 * failure from bad gpu situation and stop amdgpu init process
2554 * accordingly. For other failed cases, it will still release all
2555 * the resource and print error message, rather than returning one
2556 * negative value to upper level.
2558 * Note: theoretically, this should be called before all vram allocations
2559 * to protect retired page from abusing
2561 r = amdgpu_ras_recovery_init(adev);
2566 * In case of XGMI grab extra reference for reset domain for this device
2568 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2569 if (amdgpu_xgmi_add_device(adev) == 0) {
2570 if (!amdgpu_sriov_vf(adev)) {
2571 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2573 if (WARN_ON(!hive)) {
2578 if (!hive->reset_domain ||
2579 !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2581 amdgpu_put_xgmi_hive(hive);
2585 /* Drop the early temporary reset domain we created for device */
2586 amdgpu_reset_put_reset_domain(adev->reset_domain);
2587 adev->reset_domain = hive->reset_domain;
2588 amdgpu_put_xgmi_hive(hive);
2593 r = amdgpu_device_init_schedulers(adev);
2597 /* Don't init kfd if whole hive need to be reset during init */
2598 if (!adev->gmc.xgmi.pending_reset) {
2599 kgd2kfd_init_zone_device(adev);
2600 amdgpu_amdkfd_device_init(adev);
2603 amdgpu_fru_get_product_info(adev);
2611 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2613 * @adev: amdgpu_device pointer
2615 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
2616 * this function before a GPU reset. If the value is retained after a
2617 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
2619 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2621 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2625 * amdgpu_device_check_vram_lost - check if vram is valid
2627 * @adev: amdgpu_device pointer
2629 * Checks the reset magic value written to the gart pointer in VRAM.
2630 * The driver calls this after a GPU reset to see if the contents of
2631 * VRAM is lost or now.
2632 * returns true if vram is lost, false if not.
2634 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2636 if (memcmp(adev->gart.ptr, adev->reset_magic,
2637 AMDGPU_RESET_MAGIC_NUM))
2640 if (!amdgpu_in_reset(adev))
2644 * For all ASICs with baco/mode1 reset, the VRAM is
2645 * always assumed to be lost.
2647 switch (amdgpu_asic_reset_method(adev)) {
2648 case AMD_RESET_METHOD_BACO:
2649 case AMD_RESET_METHOD_MODE1:
2657 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2659 * @adev: amdgpu_device pointer
2660 * @state: clockgating state (gate or ungate)
2662 * The list of all the hardware IPs that make up the asic is walked and the
2663 * set_clockgating_state callbacks are run.
2664 * Late initialization pass enabling clockgating for hardware IPs.
2665 * Fini or suspend, pass disabling clockgating for hardware IPs.
2666 * Returns 0 on success, negative error code on failure.
2669 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2670 enum amd_clockgating_state state)
2674 if (amdgpu_emu_mode == 1)
2677 for (j = 0; j < adev->num_ip_blocks; j++) {
2678 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2679 if (!adev->ip_blocks[i].status.late_initialized)
2681 /* skip CG for GFX, SDMA on S0ix */
2682 if (adev->in_s0ix &&
2683 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2684 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2686 /* skip CG for VCE/UVD, it's handled specially */
2687 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2688 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2689 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2690 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2691 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2692 /* enable clockgating to save power */
2693 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2696 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2697 adev->ip_blocks[i].version->funcs->name, r);
2706 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2707 enum amd_powergating_state state)
2711 if (amdgpu_emu_mode == 1)
2714 for (j = 0; j < adev->num_ip_blocks; j++) {
2715 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2716 if (!adev->ip_blocks[i].status.late_initialized)
2718 /* skip PG for GFX, SDMA on S0ix */
2719 if (adev->in_s0ix &&
2720 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2721 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2723 /* skip CG for VCE/UVD, it's handled specially */
2724 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2725 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2726 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2727 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2728 adev->ip_blocks[i].version->funcs->set_powergating_state) {
2729 /* enable powergating to save power */
2730 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2733 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2734 adev->ip_blocks[i].version->funcs->name, r);
2742 static int amdgpu_device_enable_mgpu_fan_boost(void)
2744 struct amdgpu_gpu_instance *gpu_ins;
2745 struct amdgpu_device *adev;
2748 mutex_lock(&mgpu_info.mutex);
2751 * MGPU fan boost feature should be enabled
2752 * only when there are two or more dGPUs in
2755 if (mgpu_info.num_dgpu < 2)
2758 for (i = 0; i < mgpu_info.num_dgpu; i++) {
2759 gpu_ins = &(mgpu_info.gpu_ins[i]);
2760 adev = gpu_ins->adev;
2761 if (!(adev->flags & AMD_IS_APU) &&
2762 !gpu_ins->mgpu_fan_enabled) {
2763 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2767 gpu_ins->mgpu_fan_enabled = 1;
2772 mutex_unlock(&mgpu_info.mutex);
2778 * amdgpu_device_ip_late_init - run late init for hardware IPs
2780 * @adev: amdgpu_device pointer
2782 * Late initialization pass for hardware IPs. The list of all the hardware
2783 * IPs that make up the asic is walked and the late_init callbacks are run.
2784 * late_init covers any special initialization that an IP requires
2785 * after all of the have been initialized or something that needs to happen
2786 * late in the init process.
2787 * Returns 0 on success, negative error code on failure.
2789 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2791 struct amdgpu_gpu_instance *gpu_instance;
2794 for (i = 0; i < adev->num_ip_blocks; i++) {
2795 if (!adev->ip_blocks[i].status.hw)
2797 if (adev->ip_blocks[i].version->funcs->late_init) {
2798 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2800 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2801 adev->ip_blocks[i].version->funcs->name, r);
2805 adev->ip_blocks[i].status.late_initialized = true;
2808 r = amdgpu_ras_late_init(adev);
2810 DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2814 amdgpu_ras_set_error_query_ready(adev, true);
2816 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2817 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2819 amdgpu_device_fill_reset_magic(adev);
2821 r = amdgpu_device_enable_mgpu_fan_boost();
2823 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2825 /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2826 if (amdgpu_passthrough(adev) &&
2827 ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) ||
2828 adev->asic_type == CHIP_ALDEBARAN))
2829 amdgpu_dpm_handle_passthrough_sbr(adev, true);
2831 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2832 mutex_lock(&mgpu_info.mutex);
2835 * Reset device p-state to low as this was booted with high.
2837 * This should be performed only after all devices from the same
2838 * hive get initialized.
2840 * However, it's unknown how many device in the hive in advance.
2841 * As this is counted one by one during devices initializations.
2843 * So, we wait for all XGMI interlinked devices initialized.
2844 * This may bring some delays as those devices may come from
2845 * different hives. But that should be OK.
2847 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2848 for (i = 0; i < mgpu_info.num_gpu; i++) {
2849 gpu_instance = &(mgpu_info.gpu_ins[i]);
2850 if (gpu_instance->adev->flags & AMD_IS_APU)
2853 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2854 AMDGPU_XGMI_PSTATE_MIN);
2856 DRM_ERROR("pstate setting failed (%d).\n", r);
2862 mutex_unlock(&mgpu_info.mutex);
2869 * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2871 * @adev: amdgpu_device pointer
2873 * For ASICs need to disable SMC first
2875 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2879 if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
2882 for (i = 0; i < adev->num_ip_blocks; i++) {
2883 if (!adev->ip_blocks[i].status.hw)
2885 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2886 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2887 /* XXX handle errors */
2889 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2890 adev->ip_blocks[i].version->funcs->name, r);
2892 adev->ip_blocks[i].status.hw = false;
2898 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2902 for (i = 0; i < adev->num_ip_blocks; i++) {
2903 if (!adev->ip_blocks[i].version->funcs->early_fini)
2906 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2908 DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2909 adev->ip_blocks[i].version->funcs->name, r);
2913 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2914 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2916 amdgpu_amdkfd_suspend(adev, false);
2918 /* Workaroud for ASICs need to disable SMC first */
2919 amdgpu_device_smu_fini_early(adev);
2921 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2922 if (!adev->ip_blocks[i].status.hw)
2925 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2926 /* XXX handle errors */
2928 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2929 adev->ip_blocks[i].version->funcs->name, r);
2932 adev->ip_blocks[i].status.hw = false;
2935 if (amdgpu_sriov_vf(adev)) {
2936 if (amdgpu_virt_release_full_gpu(adev, false))
2937 DRM_ERROR("failed to release exclusive mode on fini\n");
2944 * amdgpu_device_ip_fini - run fini for hardware IPs
2946 * @adev: amdgpu_device pointer
2948 * Main teardown pass for hardware IPs. The list of all the hardware
2949 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2950 * are run. hw_fini tears down the hardware associated with each IP
2951 * and sw_fini tears down any software state associated with each IP.
2952 * Returns 0 on success, negative error code on failure.
2954 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2958 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2959 amdgpu_virt_release_ras_err_handler_data(adev);
2961 if (adev->gmc.xgmi.num_physical_nodes > 1)
2962 amdgpu_xgmi_remove_device(adev);
2964 amdgpu_amdkfd_device_fini_sw(adev);
2966 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2967 if (!adev->ip_blocks[i].status.sw)
2970 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2971 amdgpu_ucode_free_bo(adev);
2972 amdgpu_free_static_csa(&adev->virt.csa_obj);
2973 amdgpu_device_wb_fini(adev);
2974 amdgpu_device_mem_scratch_fini(adev);
2975 amdgpu_ib_pool_fini(adev);
2978 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2979 /* XXX handle errors */
2981 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2982 adev->ip_blocks[i].version->funcs->name, r);
2984 adev->ip_blocks[i].status.sw = false;
2985 adev->ip_blocks[i].status.valid = false;
2988 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2989 if (!adev->ip_blocks[i].status.late_initialized)
2991 if (adev->ip_blocks[i].version->funcs->late_fini)
2992 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2993 adev->ip_blocks[i].status.late_initialized = false;
2996 amdgpu_ras_fini(adev);
3002 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
3004 * @work: work_struct.
3006 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
3008 struct amdgpu_device *adev =
3009 container_of(work, struct amdgpu_device, delayed_init_work.work);
3012 r = amdgpu_ib_ring_tests(adev);
3014 DRM_ERROR("ib ring test failed (%d).\n", r);
3017 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
3019 struct amdgpu_device *adev =
3020 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
3022 WARN_ON_ONCE(adev->gfx.gfx_off_state);
3023 WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
3025 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
3026 adev->gfx.gfx_off_state = true;
3030 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
3032 * @adev: amdgpu_device pointer
3034 * Main suspend function for hardware IPs. The list of all the hardware
3035 * IPs that make up the asic is walked, clockgating is disabled and the
3036 * suspend callbacks are run. suspend puts the hardware and software state
3037 * in each IP into a state suitable for suspend.
3038 * Returns 0 on success, negative error code on failure.
3040 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
3044 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
3045 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
3048 * Per PMFW team's suggestion, driver needs to handle gfxoff
3049 * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
3050 * scenario. Add the missing df cstate disablement here.
3052 if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
3053 dev_warn(adev->dev, "Failed to disallow df cstate");
3055 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3056 if (!adev->ip_blocks[i].status.valid)
3059 /* displays are handled separately */
3060 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
3063 /* XXX handle errors */
3064 r = adev->ip_blocks[i].version->funcs->suspend(adev);
3065 /* XXX handle errors */
3067 DRM_ERROR("suspend of IP block <%s> failed %d\n",
3068 adev->ip_blocks[i].version->funcs->name, r);
3072 adev->ip_blocks[i].status.hw = false;
3079 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
3081 * @adev: amdgpu_device pointer
3083 * Main suspend function for hardware IPs. The list of all the hardware
3084 * IPs that make up the asic is walked, clockgating is disabled and the
3085 * suspend callbacks are run. suspend puts the hardware and software state
3086 * in each IP into a state suitable for suspend.
3087 * Returns 0 on success, negative error code on failure.
3089 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
3094 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
3096 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3097 if (!adev->ip_blocks[i].status.valid)
3099 /* displays are handled in phase1 */
3100 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
3102 /* PSP lost connection when err_event_athub occurs */
3103 if (amdgpu_ras_intr_triggered() &&
3104 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3105 adev->ip_blocks[i].status.hw = false;
3109 /* skip unnecessary suspend if we do not initialize them yet */
3110 if (adev->gmc.xgmi.pending_reset &&
3111 !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3112 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
3113 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3114 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
3115 adev->ip_blocks[i].status.hw = false;
3119 /* skip suspend of gfx/mes and psp for S0ix
3120 * gfx is in gfxoff state, so on resume it will exit gfxoff just
3121 * like at runtime. PSP is also part of the always on hardware
3122 * so no need to suspend it.
3124 if (adev->in_s0ix &&
3125 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3126 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3127 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
3130 /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
3131 if (adev->in_s0ix &&
3132 (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >=
3133 IP_VERSION(5, 0, 0)) &&
3134 (adev->ip_blocks[i].version->type ==
3135 AMD_IP_BLOCK_TYPE_SDMA))
3138 /* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.
3139 * These are in TMR, hence are expected to be reused by PSP-TOS to reload
3140 * from this location and RLC Autoload automatically also gets loaded
3141 * from here based on PMFW -> PSP message during re-init sequence.
3142 * Therefore, the psp suspend & resume should be skipped to avoid destroy
3143 * the TMR and reload FWs again for IMU enabled APU ASICs.
3145 if (amdgpu_in_reset(adev) &&
3146 (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
3147 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3150 /* XXX handle errors */
3151 r = adev->ip_blocks[i].version->funcs->suspend(adev);
3152 /* XXX handle errors */
3154 DRM_ERROR("suspend of IP block <%s> failed %d\n",
3155 adev->ip_blocks[i].version->funcs->name, r);
3157 adev->ip_blocks[i].status.hw = false;
3158 /* handle putting the SMC in the appropriate state */
3159 if (!amdgpu_sriov_vf(adev)) {
3160 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3161 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3163 DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3164 adev->mp1_state, r);
3175 * amdgpu_device_ip_suspend - run suspend for hardware IPs
3177 * @adev: amdgpu_device pointer
3179 * Main suspend function for hardware IPs. The list of all the hardware
3180 * IPs that make up the asic is walked, clockgating is disabled and the
3181 * suspend callbacks are run. suspend puts the hardware and software state
3182 * in each IP into a state suitable for suspend.
3183 * Returns 0 on success, negative error code on failure.
3185 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3189 if (amdgpu_sriov_vf(adev)) {
3190 amdgpu_virt_fini_data_exchange(adev);
3191 amdgpu_virt_request_full_gpu(adev, false);
3194 r = amdgpu_device_ip_suspend_phase1(adev);
3197 r = amdgpu_device_ip_suspend_phase2(adev);
3199 if (amdgpu_sriov_vf(adev))
3200 amdgpu_virt_release_full_gpu(adev, false);
3205 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3209 static enum amd_ip_block_type ip_order[] = {
3210 AMD_IP_BLOCK_TYPE_COMMON,
3211 AMD_IP_BLOCK_TYPE_GMC,
3212 AMD_IP_BLOCK_TYPE_PSP,
3213 AMD_IP_BLOCK_TYPE_IH,
3216 for (i = 0; i < adev->num_ip_blocks; i++) {
3218 struct amdgpu_ip_block *block;
3220 block = &adev->ip_blocks[i];
3221 block->status.hw = false;
3223 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3225 if (block->version->type != ip_order[j] ||
3226 !block->status.valid)
3229 r = block->version->funcs->hw_init(adev);
3230 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3233 block->status.hw = true;
3240 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3244 static enum amd_ip_block_type ip_order[] = {
3245 AMD_IP_BLOCK_TYPE_SMC,
3246 AMD_IP_BLOCK_TYPE_DCE,
3247 AMD_IP_BLOCK_TYPE_GFX,
3248 AMD_IP_BLOCK_TYPE_SDMA,
3249 AMD_IP_BLOCK_TYPE_MES,
3250 AMD_IP_BLOCK_TYPE_UVD,
3251 AMD_IP_BLOCK_TYPE_VCE,
3252 AMD_IP_BLOCK_TYPE_VCN,
3253 AMD_IP_BLOCK_TYPE_JPEG
3256 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3258 struct amdgpu_ip_block *block;
3260 for (j = 0; j < adev->num_ip_blocks; j++) {
3261 block = &adev->ip_blocks[j];
3263 if (block->version->type != ip_order[i] ||
3264 !block->status.valid ||
3268 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3269 r = block->version->funcs->resume(adev);
3271 r = block->version->funcs->hw_init(adev);
3273 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3276 block->status.hw = true;
3284 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3286 * @adev: amdgpu_device pointer
3288 * First resume function for hardware IPs. The list of all the hardware
3289 * IPs that make up the asic is walked and the resume callbacks are run for
3290 * COMMON, GMC, and IH. resume puts the hardware into a functional state
3291 * after a suspend and updates the software state as necessary. This
3292 * function is also used for restoring the GPU after a GPU reset.
3293 * Returns 0 on success, negative error code on failure.
3295 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3299 for (i = 0; i < adev->num_ip_blocks; i++) {
3300 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3302 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3303 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3304 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3305 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3307 r = adev->ip_blocks[i].version->funcs->resume(adev);
3309 DRM_ERROR("resume of IP block <%s> failed %d\n",
3310 adev->ip_blocks[i].version->funcs->name, r);
3313 adev->ip_blocks[i].status.hw = true;
3321 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3323 * @adev: amdgpu_device pointer
3325 * First resume function for hardware IPs. The list of all the hardware
3326 * IPs that make up the asic is walked and the resume callbacks are run for
3327 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
3328 * functional state after a suspend and updates the software state as
3329 * necessary. This function is also used for restoring the GPU after a GPU
3331 * Returns 0 on success, negative error code on failure.
3333 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3337 for (i = 0; i < adev->num_ip_blocks; i++) {
3338 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3340 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3341 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3342 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3343 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3345 r = adev->ip_blocks[i].version->funcs->resume(adev);
3347 DRM_ERROR("resume of IP block <%s> failed %d\n",
3348 adev->ip_blocks[i].version->funcs->name, r);
3351 adev->ip_blocks[i].status.hw = true;
3358 * amdgpu_device_ip_resume - run resume for hardware IPs
3360 * @adev: amdgpu_device pointer
3362 * Main resume function for hardware IPs. The hardware IPs
3363 * are split into two resume functions because they are
3364 * also used in recovering from a GPU reset and some additional
3365 * steps need to be take between them. In this case (S3/S4) they are
3367 * Returns 0 on success, negative error code on failure.
3369 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3373 r = amdgpu_device_ip_resume_phase1(adev);
3377 r = amdgpu_device_fw_loading(adev);
3381 r = amdgpu_device_ip_resume_phase2(adev);
3387 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3389 * @adev: amdgpu_device pointer
3391 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3393 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3395 if (amdgpu_sriov_vf(adev)) {
3396 if (adev->is_atom_fw) {
3397 if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3398 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3400 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3401 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3404 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3405 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3410 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3412 * @asic_type: AMD asic type
3414 * Check if there is DC (new modesetting infrastructre) support for an asic.
3415 * returns true if DC has support, false if not.
3417 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3419 switch (asic_type) {
3420 #ifdef CONFIG_DRM_AMDGPU_SI
3424 /* chips with no display hardware */
3426 #if defined(CONFIG_DRM_AMD_DC)
3432 * We have systems in the wild with these ASICs that require
3433 * LVDS and VGA support which is not supported with DC.
3435 * Fallback to the non-DC driver here by default so as not to
3436 * cause regressions.
3438 #if defined(CONFIG_DRM_AMD_DC_SI)
3439 return amdgpu_dc > 0;
3448 * We have systems in the wild with these ASICs that require
3449 * VGA support which is not supported with DC.
3451 * Fallback to the non-DC driver here by default so as not to
3452 * cause regressions.
3454 return amdgpu_dc > 0;
3456 return amdgpu_dc != 0;
3460 DRM_INFO_ONCE("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
3467 * amdgpu_device_has_dc_support - check if dc is supported
3469 * @adev: amdgpu_device pointer
3471 * Returns true for supported, false for not supported
3473 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3475 if (adev->enable_virtual_display ||
3476 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3479 return amdgpu_device_asic_has_dc_support(adev->asic_type);
3482 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3484 struct amdgpu_device *adev =
3485 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3486 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3488 /* It's a bug to not have a hive within this function */
3493 * Use task barrier to synchronize all xgmi reset works across the
3494 * hive. task_barrier_enter and task_barrier_exit will block
3495 * until all the threads running the xgmi reset works reach
3496 * those points. task_barrier_full will do both blocks.
3498 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3500 task_barrier_enter(&hive->tb);
3501 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3503 if (adev->asic_reset_res)
3506 task_barrier_exit(&hive->tb);
3507 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3509 if (adev->asic_reset_res)
3512 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3513 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3514 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3517 task_barrier_full(&hive->tb);
3518 adev->asic_reset_res = amdgpu_asic_reset(adev);
3522 if (adev->asic_reset_res)
3523 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3524 adev->asic_reset_res, adev_to_drm(adev)->unique);
3525 amdgpu_put_xgmi_hive(hive);
3528 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3530 char *input = amdgpu_lockup_timeout;
3531 char *timeout_setting = NULL;
3537 * By default timeout for non compute jobs is 10000
3538 * and 60000 for compute jobs.
3539 * In SR-IOV or passthrough mode, timeout for compute
3540 * jobs are 60000 by default.
3542 adev->gfx_timeout = msecs_to_jiffies(10000);
3543 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3544 if (amdgpu_sriov_vf(adev))
3545 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3546 msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3548 adev->compute_timeout = msecs_to_jiffies(60000);
3550 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3551 while ((timeout_setting = strsep(&input, ",")) &&
3552 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3553 ret = kstrtol(timeout_setting, 0, &timeout);
3560 } else if (timeout < 0) {
3561 timeout = MAX_SCHEDULE_TIMEOUT;
3562 dev_warn(adev->dev, "lockup timeout disabled");
3563 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3565 timeout = msecs_to_jiffies(timeout);
3570 adev->gfx_timeout = timeout;
3573 adev->compute_timeout = timeout;
3576 adev->sdma_timeout = timeout;
3579 adev->video_timeout = timeout;
3586 * There is only one value specified and
3587 * it should apply to all non-compute jobs.
3590 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3591 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3592 adev->compute_timeout = adev->gfx_timeout;
3600 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3602 * @adev: amdgpu_device pointer
3604 * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3606 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3608 struct iommu_domain *domain;
3610 domain = iommu_get_domain_for_dev(adev->dev);
3611 if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3612 adev->ram_is_direct_mapped = true;
3615 static const struct attribute *amdgpu_dev_attributes[] = {
3616 &dev_attr_pcie_replay_count.attr,
3620 static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
3622 if (amdgpu_mcbp == 1)
3623 adev->gfx.mcbp = true;
3624 else if (amdgpu_mcbp == 0)
3625 adev->gfx.mcbp = false;
3626 else if ((amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 0, 0)) &&
3627 (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 0, 0)) &&
3628 adev->gfx.num_gfx_rings)
3629 adev->gfx.mcbp = true;
3631 if (amdgpu_sriov_vf(adev))
3632 adev->gfx.mcbp = true;
3635 DRM_INFO("MCBP is enabled\n");
3639 * amdgpu_device_init - initialize the driver
3641 * @adev: amdgpu_device pointer
3642 * @flags: driver flags
3644 * Initializes the driver info and hw (all asics).
3645 * Returns 0 for success or an error on failure.
3646 * Called at driver startup.
3648 int amdgpu_device_init(struct amdgpu_device *adev,
3651 struct drm_device *ddev = adev_to_drm(adev);
3652 struct pci_dev *pdev = adev->pdev;
3658 adev->shutdown = false;
3659 adev->flags = flags;
3661 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3662 adev->asic_type = amdgpu_force_asic_type;
3664 adev->asic_type = flags & AMD_ASIC_MASK;
3666 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3667 if (amdgpu_emu_mode == 1)
3668 adev->usec_timeout *= 10;
3669 adev->gmc.gart_size = 512 * 1024 * 1024;
3670 adev->accel_working = false;
3671 adev->num_rings = 0;
3672 RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3673 adev->mman.buffer_funcs = NULL;
3674 adev->mman.buffer_funcs_ring = NULL;
3675 adev->vm_manager.vm_pte_funcs = NULL;
3676 adev->vm_manager.vm_pte_num_scheds = 0;
3677 adev->gmc.gmc_funcs = NULL;
3678 adev->harvest_ip_mask = 0x0;
3679 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3680 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3682 adev->smc_rreg = &amdgpu_invalid_rreg;
3683 adev->smc_wreg = &amdgpu_invalid_wreg;
3684 adev->pcie_rreg = &amdgpu_invalid_rreg;
3685 adev->pcie_wreg = &amdgpu_invalid_wreg;
3686 adev->pcie_rreg_ext = &amdgpu_invalid_rreg_ext;
3687 adev->pcie_wreg_ext = &amdgpu_invalid_wreg_ext;
3688 adev->pciep_rreg = &amdgpu_invalid_rreg;
3689 adev->pciep_wreg = &amdgpu_invalid_wreg;
3690 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3691 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3692 adev->pcie_rreg64_ext = &amdgpu_invalid_rreg64_ext;
3693 adev->pcie_wreg64_ext = &amdgpu_invalid_wreg64_ext;
3694 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3695 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3696 adev->didt_rreg = &amdgpu_invalid_rreg;
3697 adev->didt_wreg = &amdgpu_invalid_wreg;
3698 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3699 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3700 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3701 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3703 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3704 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3705 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3707 /* mutex initialization are all done here so we
3708 * can recall function without having locking issues
3710 mutex_init(&adev->firmware.mutex);
3711 mutex_init(&adev->pm.mutex);
3712 mutex_init(&adev->gfx.gpu_clock_mutex);
3713 mutex_init(&adev->srbm_mutex);
3714 mutex_init(&adev->gfx.pipe_reserve_mutex);
3715 mutex_init(&adev->gfx.gfx_off_mutex);
3716 mutex_init(&adev->gfx.partition_mutex);
3717 mutex_init(&adev->grbm_idx_mutex);
3718 mutex_init(&adev->mn_lock);
3719 mutex_init(&adev->virt.vf_errors.lock);
3720 hash_init(adev->mn_hash);
3721 mutex_init(&adev->psp.mutex);
3722 mutex_init(&adev->notifier_lock);
3723 mutex_init(&adev->pm.stable_pstate_ctx_lock);
3724 mutex_init(&adev->benchmark_mutex);
3726 amdgpu_device_init_apu_flags(adev);
3728 r = amdgpu_device_check_arguments(adev);
3732 spin_lock_init(&adev->mmio_idx_lock);
3733 spin_lock_init(&adev->smc_idx_lock);
3734 spin_lock_init(&adev->pcie_idx_lock);
3735 spin_lock_init(&adev->uvd_ctx_idx_lock);
3736 spin_lock_init(&adev->didt_idx_lock);
3737 spin_lock_init(&adev->gc_cac_idx_lock);
3738 spin_lock_init(&adev->se_cac_idx_lock);
3739 spin_lock_init(&adev->audio_endpt_idx_lock);
3740 spin_lock_init(&adev->mm_stats.lock);
3742 INIT_LIST_HEAD(&adev->shadow_list);
3743 mutex_init(&adev->shadow_list_lock);
3745 INIT_LIST_HEAD(&adev->reset_list);
3747 INIT_LIST_HEAD(&adev->ras_list);
3749 INIT_LIST_HEAD(&adev->pm.od_kobj_list);
3751 INIT_DELAYED_WORK(&adev->delayed_init_work,
3752 amdgpu_device_delayed_init_work_handler);
3753 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3754 amdgpu_device_delay_enable_gfx_off);
3756 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3758 adev->gfx.gfx_off_req_count = 1;
3759 adev->gfx.gfx_off_residency = 0;
3760 adev->gfx.gfx_off_entrycount = 0;
3761 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3763 atomic_set(&adev->throttling_logging_enabled, 1);
3765 * If throttling continues, logging will be performed every minute
3766 * to avoid log flooding. "-1" is subtracted since the thermal
3767 * throttling interrupt comes every second. Thus, the total logging
3768 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3769 * for throttling interrupt) = 60 seconds.
3771 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3772 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3774 /* Registers mapping */
3775 /* TODO: block userspace mapping of io register */
3776 if (adev->asic_type >= CHIP_BONAIRE) {
3777 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3778 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3780 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3781 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3784 for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3785 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3787 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3791 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3792 DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size);
3795 * Reset domain needs to be present early, before XGMI hive discovered
3796 * (if any) and intitialized to use reset sem and in_gpu reset flag
3797 * early on during init and before calling to RREG32.
3799 adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3800 if (!adev->reset_domain)
3803 /* detect hw virtualization here */
3804 amdgpu_detect_virtualization(adev);
3806 amdgpu_device_get_pcie_info(adev);
3808 r = amdgpu_device_get_job_timeout_settings(adev);
3810 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3814 /* early init functions */
3815 r = amdgpu_device_ip_early_init(adev);
3819 amdgpu_device_set_mcbp(adev);
3821 /* Get rid of things like offb */
3822 r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
3826 /* Enable TMZ based on IP_VERSION */
3827 amdgpu_gmc_tmz_set(adev);
3829 amdgpu_gmc_noretry_set(adev);
3830 /* Need to get xgmi info early to decide the reset behavior*/
3831 if (adev->gmc.xgmi.supported) {
3832 r = adev->gfxhub.funcs->get_xgmi_info(adev);
3837 /* enable PCIE atomic ops */
3838 if (amdgpu_sriov_vf(adev)) {
3839 if (adev->virt.fw_reserve.p_pf2vf)
3840 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3841 adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3842 (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3843 /* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
3844 * internal path natively support atomics, set have_atomics_support to true.
3846 } else if ((adev->flags & AMD_IS_APU) &&
3847 (amdgpu_ip_version(adev, GC_HWIP, 0) >
3848 IP_VERSION(9, 0, 0))) {
3849 adev->have_atomics_support = true;
3851 adev->have_atomics_support =
3852 !pci_enable_atomic_ops_to_root(adev->pdev,
3853 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3854 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3857 if (!adev->have_atomics_support)
3858 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3860 /* doorbell bar mapping and doorbell index init*/
3861 amdgpu_doorbell_init(adev);
3863 if (amdgpu_emu_mode == 1) {
3864 /* post the asic on emulation mode */
3865 emu_soc_asic_init(adev);
3866 goto fence_driver_init;
3869 amdgpu_reset_init(adev);
3871 /* detect if we are with an SRIOV vbios */
3873 amdgpu_device_detect_sriov_bios(adev);
3875 /* check if we need to reset the asic
3876 * E.g., driver was not cleanly unloaded previously, etc.
3878 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3879 if (adev->gmc.xgmi.num_physical_nodes) {
3880 dev_info(adev->dev, "Pending hive reset.\n");
3881 adev->gmc.xgmi.pending_reset = true;
3882 /* Only need to init necessary block for SMU to handle the reset */
3883 for (i = 0; i < adev->num_ip_blocks; i++) {
3884 if (!adev->ip_blocks[i].status.valid)
3886 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3887 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3888 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3889 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3890 DRM_DEBUG("IP %s disabled for hw_init.\n",
3891 adev->ip_blocks[i].version->funcs->name);
3892 adev->ip_blocks[i].status.hw = true;
3896 tmp = amdgpu_reset_method;
3897 /* It should do a default reset when loading or reloading the driver,
3898 * regardless of the module parameter reset_method.
3900 amdgpu_reset_method = AMD_RESET_METHOD_NONE;
3901 r = amdgpu_asic_reset(adev);
3902 amdgpu_reset_method = tmp;
3904 dev_err(adev->dev, "asic reset on init failed\n");
3910 /* Post card if necessary */
3911 if (amdgpu_device_need_post(adev)) {
3913 dev_err(adev->dev, "no vBIOS found\n");
3917 DRM_INFO("GPU posting now...\n");
3918 r = amdgpu_device_asic_init(adev);
3920 dev_err(adev->dev, "gpu post error!\n");
3926 if (adev->is_atom_fw) {
3927 /* Initialize clocks */
3928 r = amdgpu_atomfirmware_get_clock_info(adev);
3930 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3931 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3935 /* Initialize clocks */
3936 r = amdgpu_atombios_get_clock_info(adev);
3938 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3939 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3942 /* init i2c buses */
3943 if (!amdgpu_device_has_dc_support(adev))
3944 amdgpu_atombios_i2c_init(adev);
3950 r = amdgpu_fence_driver_sw_init(adev);
3952 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3953 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3957 /* init the mode config */
3958 drm_mode_config_init(adev_to_drm(adev));
3960 r = amdgpu_device_ip_init(adev);
3962 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3963 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3964 goto release_ras_con;
3967 amdgpu_fence_driver_hw_init(adev);
3970 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3971 adev->gfx.config.max_shader_engines,
3972 adev->gfx.config.max_sh_per_se,
3973 adev->gfx.config.max_cu_per_sh,
3974 adev->gfx.cu_info.number);
3976 adev->accel_working = true;
3978 amdgpu_vm_check_compute_bug(adev);
3980 /* Initialize the buffer migration limit. */
3981 if (amdgpu_moverate >= 0)
3982 max_MBps = amdgpu_moverate;
3984 max_MBps = 8; /* Allow 8 MB/s. */
3985 /* Get a log2 for easy divisions. */
3986 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3989 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3990 * Otherwise the mgpu fan boost feature will be skipped due to the
3991 * gpu instance is counted less.
3993 amdgpu_register_gpu_instance(adev);
3995 /* enable clockgating, etc. after ib tests, etc. since some blocks require
3996 * explicit gating rather than handling it automatically.
3998 if (!adev->gmc.xgmi.pending_reset) {
3999 r = amdgpu_device_ip_late_init(adev);
4001 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
4002 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
4003 goto release_ras_con;
4006 amdgpu_ras_resume(adev);
4007 queue_delayed_work(system_wq, &adev->delayed_init_work,
4008 msecs_to_jiffies(AMDGPU_RESUME_MS));
4011 if (amdgpu_sriov_vf(adev)) {
4012 amdgpu_virt_release_full_gpu(adev, true);
4013 flush_delayed_work(&adev->delayed_init_work);
4017 * Place those sysfs registering after `late_init`. As some of those
4018 * operations performed in `late_init` might affect the sysfs
4019 * interfaces creating.
4021 r = amdgpu_atombios_sysfs_init(adev);
4023 drm_err(&adev->ddev,
4024 "registering atombios sysfs failed (%d).\n", r);
4026 r = amdgpu_pm_sysfs_init(adev);
4028 DRM_ERROR("registering pm sysfs failed (%d).\n", r);
4030 r = amdgpu_ucode_sysfs_init(adev);
4032 adev->ucode_sysfs_en = false;
4033 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
4035 adev->ucode_sysfs_en = true;
4037 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
4039 dev_err(adev->dev, "Could not create amdgpu device attr\n");
4041 amdgpu_fru_sysfs_init(adev);
4043 if (IS_ENABLED(CONFIG_PERF_EVENTS))
4044 r = amdgpu_pmu_init(adev);
4046 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
4048 /* Have stored pci confspace at hand for restore in sudden PCI error */
4049 if (amdgpu_device_cache_pci_state(adev->pdev))
4050 pci_restore_state(pdev);
4052 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
4053 /* this will fail for cards that aren't VGA class devices, just
4056 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4057 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
4059 px = amdgpu_device_supports_px(ddev);
4061 if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
4062 apple_gmux_detect(NULL, NULL)))
4063 vga_switcheroo_register_client(adev->pdev,
4064 &amdgpu_switcheroo_ops, px);
4067 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
4069 if (adev->gmc.xgmi.pending_reset)
4070 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
4071 msecs_to_jiffies(AMDGPU_RESUME_MS));
4073 amdgpu_device_check_iommu_direct_map(adev);
4078 if (amdgpu_sriov_vf(adev))
4079 amdgpu_virt_release_full_gpu(adev, true);
4081 /* failed in exclusive mode due to timeout */
4082 if (amdgpu_sriov_vf(adev) &&
4083 !amdgpu_sriov_runtime(adev) &&
4084 amdgpu_virt_mmio_blocked(adev) &&
4085 !amdgpu_virt_wait_reset(adev)) {
4086 dev_err(adev->dev, "VF exclusive mode timeout\n");
4087 /* Don't send request since VF is inactive. */
4088 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
4089 adev->virt.ops = NULL;
4092 amdgpu_release_ras_context(adev);
4095 amdgpu_vf_error_trans_all(adev);
4100 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
4103 /* Clear all CPU mappings pointing to this device */
4104 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
4106 /* Unmap all mapped bars - Doorbell, registers and VRAM */
4107 amdgpu_doorbell_fini(adev);
4109 iounmap(adev->rmmio);
4111 if (adev->mman.aper_base_kaddr)
4112 iounmap(adev->mman.aper_base_kaddr);
4113 adev->mman.aper_base_kaddr = NULL;
4115 /* Memory manager related */
4116 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
4117 arch_phys_wc_del(adev->gmc.vram_mtrr);
4118 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
4123 * amdgpu_device_fini_hw - tear down the driver
4125 * @adev: amdgpu_device pointer
4127 * Tear down the driver info (all asics).
4128 * Called at driver shutdown.
4130 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
4132 dev_info(adev->dev, "amdgpu: finishing device.\n");
4133 flush_delayed_work(&adev->delayed_init_work);
4134 adev->shutdown = true;
4136 /* make sure IB test finished before entering exclusive mode
4137 * to avoid preemption on IB test
4139 if (amdgpu_sriov_vf(adev)) {
4140 amdgpu_virt_request_full_gpu(adev, false);
4141 amdgpu_virt_fini_data_exchange(adev);
4144 /* disable all interrupts */
4145 amdgpu_irq_disable_all(adev);
4146 if (adev->mode_info.mode_config_initialized) {
4147 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
4148 drm_helper_force_disable_all(adev_to_drm(adev));
4150 drm_atomic_helper_shutdown(adev_to_drm(adev));
4152 amdgpu_fence_driver_hw_fini(adev);
4154 if (adev->mman.initialized)
4155 drain_workqueue(adev->mman.bdev.wq);
4157 if (adev->pm.sysfs_initialized)
4158 amdgpu_pm_sysfs_fini(adev);
4159 if (adev->ucode_sysfs_en)
4160 amdgpu_ucode_sysfs_fini(adev);
4161 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
4162 amdgpu_fru_sysfs_fini(adev);
4164 /* disable ras feature must before hw fini */
4165 amdgpu_ras_pre_fini(adev);
4167 amdgpu_device_ip_fini_early(adev);
4169 amdgpu_irq_fini_hw(adev);
4171 if (adev->mman.initialized)
4172 ttm_device_clear_dma_mappings(&adev->mman.bdev);
4174 amdgpu_gart_dummy_page_fini(adev);
4176 if (drm_dev_is_unplugged(adev_to_drm(adev)))
4177 amdgpu_device_unmap_mmio(adev);
4181 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4186 amdgpu_fence_driver_sw_fini(adev);
4187 amdgpu_device_ip_fini(adev);
4188 amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
4189 adev->accel_working = false;
4190 dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4192 amdgpu_reset_fini(adev);
4194 /* free i2c buses */
4195 if (!amdgpu_device_has_dc_support(adev))
4196 amdgpu_i2c_fini(adev);
4198 if (amdgpu_emu_mode != 1)
4199 amdgpu_atombios_fini(adev);
4204 px = amdgpu_device_supports_px(adev_to_drm(adev));
4206 if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
4207 apple_gmux_detect(NULL, NULL)))
4208 vga_switcheroo_unregister_client(adev->pdev);
4211 vga_switcheroo_fini_domain_pm_ops(adev->dev);
4213 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4214 vga_client_unregister(adev->pdev);
4216 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4218 iounmap(adev->rmmio);
4220 amdgpu_doorbell_fini(adev);
4224 if (IS_ENABLED(CONFIG_PERF_EVENTS))
4225 amdgpu_pmu_fini(adev);
4226 if (adev->mman.discovery_bin)
4227 amdgpu_discovery_fini(adev);
4229 amdgpu_reset_put_reset_domain(adev->reset_domain);
4230 adev->reset_domain = NULL;
4232 kfree(adev->pci_state);
4237 * amdgpu_device_evict_resources - evict device resources
4238 * @adev: amdgpu device object
4240 * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4241 * of the vram memory type. Mainly used for evicting device resources
4245 static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4249 /* No need to evict vram on APUs for suspend to ram or s2idle */
4250 if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4253 ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4255 DRM_WARN("evicting device resources failed\n");
4263 * amdgpu_device_suspend - initiate device suspend
4265 * @dev: drm dev pointer
4266 * @fbcon : notify the fbdev of suspend
4268 * Puts the hw in the suspend state (all asics).
4269 * Returns 0 for success or an error on failure.
4270 * Called at driver suspend.
4272 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4274 struct amdgpu_device *adev = drm_to_adev(dev);
4277 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4280 adev->in_suspend = true;
4282 /* Evict the majority of BOs before grabbing the full access */
4283 r = amdgpu_device_evict_resources(adev);
4287 if (amdgpu_sriov_vf(adev)) {
4288 amdgpu_virt_fini_data_exchange(adev);
4289 r = amdgpu_virt_request_full_gpu(adev, false);
4294 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4295 DRM_WARN("smart shift update failed\n");
4298 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4300 cancel_delayed_work_sync(&adev->delayed_init_work);
4301 flush_delayed_work(&adev->gfx.gfx_off_delay_work);
4303 amdgpu_ras_suspend(adev);
4305 amdgpu_device_ip_suspend_phase1(adev);
4308 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4310 r = amdgpu_device_evict_resources(adev);
4314 amdgpu_fence_driver_hw_fini(adev);
4316 amdgpu_device_ip_suspend_phase2(adev);
4318 if (amdgpu_sriov_vf(adev))
4319 amdgpu_virt_release_full_gpu(adev, false);
4325 * amdgpu_device_resume - initiate device resume
4327 * @dev: drm dev pointer
4328 * @fbcon : notify the fbdev of resume
4330 * Bring the hw back to operating state (all asics).
4331 * Returns 0 for success or an error on failure.
4332 * Called at driver resume.
4334 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4336 struct amdgpu_device *adev = drm_to_adev(dev);
4339 if (amdgpu_sriov_vf(adev)) {
4340 r = amdgpu_virt_request_full_gpu(adev, true);
4345 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4349 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4352 if (amdgpu_device_need_post(adev)) {
4353 r = amdgpu_device_asic_init(adev);
4355 dev_err(adev->dev, "amdgpu asic init failed\n");
4358 r = amdgpu_device_ip_resume(adev);
4361 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4364 amdgpu_fence_driver_hw_init(adev);
4366 r = amdgpu_device_ip_late_init(adev);
4370 queue_delayed_work(system_wq, &adev->delayed_init_work,
4371 msecs_to_jiffies(AMDGPU_RESUME_MS));
4373 if (!adev->in_s0ix) {
4374 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4380 if (amdgpu_sriov_vf(adev)) {
4381 amdgpu_virt_init_data_exchange(adev);
4382 amdgpu_virt_release_full_gpu(adev, true);
4388 /* Make sure IB tests flushed */
4389 flush_delayed_work(&adev->delayed_init_work);
4392 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4394 amdgpu_ras_resume(adev);
4396 if (adev->mode_info.num_crtc) {
4398 * Most of the connector probing functions try to acquire runtime pm
4399 * refs to ensure that the GPU is powered on when connector polling is
4400 * performed. Since we're calling this from a runtime PM callback,
4401 * trying to acquire rpm refs will cause us to deadlock.
4403 * Since we're guaranteed to be holding the rpm lock, it's safe to
4404 * temporarily disable the rpm helpers so this doesn't deadlock us.
4407 dev->dev->power.disable_depth++;
4409 if (!adev->dc_enabled)
4410 drm_helper_hpd_irq_event(dev);
4412 drm_kms_helper_hotplug_event(dev);
4414 dev->dev->power.disable_depth--;
4417 adev->in_suspend = false;
4419 if (adev->enable_mes)
4420 amdgpu_mes_self_test(adev);
4422 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4423 DRM_WARN("smart shift update failed\n");
4429 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4431 * @adev: amdgpu_device pointer
4433 * The list of all the hardware IPs that make up the asic is walked and
4434 * the check_soft_reset callbacks are run. check_soft_reset determines
4435 * if the asic is still hung or not.
4436 * Returns true if any of the IPs are still in a hung state, false if not.
4438 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4441 bool asic_hang = false;
4443 if (amdgpu_sriov_vf(adev))
4446 if (amdgpu_asic_need_full_reset(adev))
4449 for (i = 0; i < adev->num_ip_blocks; i++) {
4450 if (!adev->ip_blocks[i].status.valid)
4452 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4453 adev->ip_blocks[i].status.hang =
4454 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4455 if (adev->ip_blocks[i].status.hang) {
4456 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4464 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4466 * @adev: amdgpu_device pointer
4468 * The list of all the hardware IPs that make up the asic is walked and the
4469 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
4470 * handles any IP specific hardware or software state changes that are
4471 * necessary for a soft reset to succeed.
4472 * Returns 0 on success, negative error code on failure.
4474 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4478 for (i = 0; i < adev->num_ip_blocks; i++) {
4479 if (!adev->ip_blocks[i].status.valid)
4481 if (adev->ip_blocks[i].status.hang &&
4482 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4483 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4493 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4495 * @adev: amdgpu_device pointer
4497 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
4498 * reset is necessary to recover.
4499 * Returns true if a full asic reset is required, false if not.
4501 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4505 if (amdgpu_asic_need_full_reset(adev))
4508 for (i = 0; i < adev->num_ip_blocks; i++) {
4509 if (!adev->ip_blocks[i].status.valid)
4511 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4512 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4513 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4514 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4515 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4516 if (adev->ip_blocks[i].status.hang) {
4517 dev_info(adev->dev, "Some block need full reset!\n");
4526 * amdgpu_device_ip_soft_reset - do a soft reset
4528 * @adev: amdgpu_device pointer
4530 * The list of all the hardware IPs that make up the asic is walked and the
4531 * soft_reset callbacks are run if the block is hung. soft_reset handles any
4532 * IP specific hardware or software state changes that are necessary to soft
4534 * Returns 0 on success, negative error code on failure.
4536 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4540 for (i = 0; i < adev->num_ip_blocks; i++) {
4541 if (!adev->ip_blocks[i].status.valid)
4543 if (adev->ip_blocks[i].status.hang &&
4544 adev->ip_blocks[i].version->funcs->soft_reset) {
4545 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4555 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4557 * @adev: amdgpu_device pointer
4559 * The list of all the hardware IPs that make up the asic is walked and the
4560 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
4561 * handles any IP specific hardware or software state changes that are
4562 * necessary after the IP has been soft reset.
4563 * Returns 0 on success, negative error code on failure.
4565 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4569 for (i = 0; i < adev->num_ip_blocks; i++) {
4570 if (!adev->ip_blocks[i].status.valid)
4572 if (adev->ip_blocks[i].status.hang &&
4573 adev->ip_blocks[i].version->funcs->post_soft_reset)
4574 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4583 * amdgpu_device_recover_vram - Recover some VRAM contents
4585 * @adev: amdgpu_device pointer
4587 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
4588 * restore things like GPUVM page tables after a GPU reset where
4589 * the contents of VRAM might be lost.
4592 * 0 on success, negative error code on failure.
4594 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4596 struct dma_fence *fence = NULL, *next = NULL;
4597 struct amdgpu_bo *shadow;
4598 struct amdgpu_bo_vm *vmbo;
4601 if (amdgpu_sriov_runtime(adev))
4602 tmo = msecs_to_jiffies(8000);
4604 tmo = msecs_to_jiffies(100);
4606 dev_info(adev->dev, "recover vram bo from shadow start\n");
4607 mutex_lock(&adev->shadow_list_lock);
4608 list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4609 /* If vm is compute context or adev is APU, shadow will be NULL */
4612 shadow = vmbo->shadow;
4614 /* No need to recover an evicted BO */
4615 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4616 shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4617 shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4620 r = amdgpu_bo_restore_shadow(shadow, &next);
4625 tmo = dma_fence_wait_timeout(fence, false, tmo);
4626 dma_fence_put(fence);
4631 } else if (tmo < 0) {
4639 mutex_unlock(&adev->shadow_list_lock);
4642 tmo = dma_fence_wait_timeout(fence, false, tmo);
4643 dma_fence_put(fence);
4645 if (r < 0 || tmo <= 0) {
4646 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4650 dev_info(adev->dev, "recover vram bo from shadow done\n");
4656 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4658 * @adev: amdgpu_device pointer
4659 * @from_hypervisor: request from hypervisor
4661 * do VF FLR and reinitialize Asic
4662 * return 0 means succeeded otherwise failed
4664 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4665 bool from_hypervisor)
4668 struct amdgpu_hive_info *hive = NULL;
4669 int retry_limit = 0;
4672 amdgpu_amdkfd_pre_reset(adev);
4674 if (from_hypervisor)
4675 r = amdgpu_virt_request_full_gpu(adev, true);
4677 r = amdgpu_virt_reset_gpu(adev);
4680 amdgpu_irq_gpu_reset_resume_helper(adev);
4682 /* some sw clean up VF needs to do before recover */
4683 amdgpu_virt_post_reset(adev);
4685 /* Resume IP prior to SMC */
4686 r = amdgpu_device_ip_reinit_early_sriov(adev);
4690 amdgpu_virt_init_data_exchange(adev);
4692 r = amdgpu_device_fw_loading(adev);
4696 /* now we are okay to resume SMC/CP/SDMA */
4697 r = amdgpu_device_ip_reinit_late_sriov(adev);
4701 hive = amdgpu_get_xgmi_hive(adev);
4702 /* Update PSP FW topology after reset */
4703 if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4704 r = amdgpu_xgmi_update_topology(hive, adev);
4707 amdgpu_put_xgmi_hive(hive);
4710 r = amdgpu_ib_ring_tests(adev);
4712 amdgpu_amdkfd_post_reset(adev);
4716 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4717 amdgpu_inc_vram_lost(adev);
4718 r = amdgpu_device_recover_vram(adev);
4720 amdgpu_virt_release_full_gpu(adev, true);
4722 if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4723 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4727 DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4734 * amdgpu_device_has_job_running - check if there is any job in mirror list
4736 * @adev: amdgpu_device pointer
4738 * check if there is any job in mirror list
4740 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4743 struct drm_sched_job *job;
4745 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4746 struct amdgpu_ring *ring = adev->rings[i];
4748 if (!ring || !ring->sched.thread)
4751 spin_lock(&ring->sched.job_list_lock);
4752 job = list_first_entry_or_null(&ring->sched.pending_list,
4753 struct drm_sched_job, list);
4754 spin_unlock(&ring->sched.job_list_lock);
4762 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4764 * @adev: amdgpu_device pointer
4766 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4769 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4772 if (amdgpu_gpu_recovery == 0)
4775 /* Skip soft reset check in fatal error mode */
4776 if (!amdgpu_ras_is_poison_mode_supported(adev))
4779 if (amdgpu_sriov_vf(adev))
4782 if (amdgpu_gpu_recovery == -1) {
4783 switch (adev->asic_type) {
4784 #ifdef CONFIG_DRM_AMDGPU_SI
4791 #ifdef CONFIG_DRM_AMDGPU_CIK
4798 case CHIP_CYAN_SKILLFISH:
4808 dev_info(adev->dev, "GPU recovery disabled.\n");
4812 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4817 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4819 dev_info(adev->dev, "GPU mode1 reset\n");
4822 pci_clear_master(adev->pdev);
4824 amdgpu_device_cache_pci_state(adev->pdev);
4826 if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4827 dev_info(adev->dev, "GPU smu mode1 reset\n");
4828 ret = amdgpu_dpm_mode1_reset(adev);
4830 dev_info(adev->dev, "GPU psp mode1 reset\n");
4831 ret = psp_gpu_reset(adev);
4835 goto mode1_reset_failed;
4837 amdgpu_device_load_pci_state(adev->pdev);
4838 ret = amdgpu_psp_wait_for_bootloader(adev);
4840 goto mode1_reset_failed;
4842 /* wait for asic to come out of reset */
4843 for (i = 0; i < adev->usec_timeout; i++) {
4844 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4846 if (memsize != 0xffffffff)
4851 if (i >= adev->usec_timeout) {
4853 goto mode1_reset_failed;
4856 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4861 dev_err(adev->dev, "GPU mode1 reset failed\n");
4865 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4866 struct amdgpu_reset_context *reset_context)
4869 struct amdgpu_job *job = NULL;
4870 bool need_full_reset =
4871 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4873 if (reset_context->reset_req_dev == adev)
4874 job = reset_context->job;
4876 if (amdgpu_sriov_vf(adev)) {
4877 /* stop the data exchange thread */
4878 amdgpu_virt_fini_data_exchange(adev);
4881 amdgpu_fence_driver_isr_toggle(adev, true);
4883 /* block all schedulers and reset given job's ring */
4884 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4885 struct amdgpu_ring *ring = adev->rings[i];
4887 if (!ring || !ring->sched.thread)
4890 /* Clear job fence from fence drv to avoid force_completion
4891 * leave NULL and vm flush fence in fence drv
4893 amdgpu_fence_driver_clear_job_fences(ring);
4895 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4896 amdgpu_fence_driver_force_completion(ring);
4899 amdgpu_fence_driver_isr_toggle(adev, false);
4902 drm_sched_increase_karma(&job->base);
4904 r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4905 /* If reset handler not implemented, continue; otherwise return */
4906 if (r == -EOPNOTSUPP)
4911 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4912 if (!amdgpu_sriov_vf(adev)) {
4914 if (!need_full_reset)
4915 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4917 if (!need_full_reset && amdgpu_gpu_recovery &&
4918 amdgpu_device_ip_check_soft_reset(adev)) {
4919 amdgpu_device_ip_pre_soft_reset(adev);
4920 r = amdgpu_device_ip_soft_reset(adev);
4921 amdgpu_device_ip_post_soft_reset(adev);
4922 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4923 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4924 need_full_reset = true;
4928 if (need_full_reset)
4929 r = amdgpu_device_ip_suspend(adev);
4930 if (need_full_reset)
4931 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4933 clear_bit(AMDGPU_NEED_FULL_RESET,
4934 &reset_context->flags);
4940 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
4944 lockdep_assert_held(&adev->reset_domain->sem);
4946 for (i = 0; i < adev->num_regs; i++) {
4947 adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]);
4948 trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
4949 adev->reset_dump_reg_value[i]);
4955 #ifndef CONFIG_DEV_COREDUMP
4956 static void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
4957 struct amdgpu_reset_context *reset_context)
4961 static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
4962 size_t count, void *data, size_t datalen)
4964 struct drm_printer p;
4965 struct amdgpu_coredump_info *coredump = data;
4966 struct drm_print_iterator iter;
4971 iter.start = offset;
4972 iter.remain = count;
4974 p = drm_coredump_printer(&iter);
4976 drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
4977 drm_printf(&p, "kernel: " UTS_RELEASE "\n");
4978 drm_printf(&p, "module: " KBUILD_MODNAME "\n");
4979 drm_printf(&p, "time: %lld.%09ld\n", coredump->reset_time.tv_sec, coredump->reset_time.tv_nsec);
4980 if (coredump->reset_task_info.pid)
4981 drm_printf(&p, "process_name: %s PID: %d\n",
4982 coredump->reset_task_info.process_name,
4983 coredump->reset_task_info.pid);
4985 if (coredump->reset_vram_lost)
4986 drm_printf(&p, "VRAM is lost due to GPU reset!\n");
4987 if (coredump->adev->num_regs) {
4988 drm_printf(&p, "AMDGPU register dumps:\nOffset: Value:\n");
4990 for (i = 0; i < coredump->adev->num_regs; i++)
4991 drm_printf(&p, "0x%08x: 0x%08x\n",
4992 coredump->adev->reset_dump_reg_list[i],
4993 coredump->adev->reset_dump_reg_value[i]);
4996 return count - iter.remain;
4999 static void amdgpu_devcoredump_free(void *data)
5004 static void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
5005 struct amdgpu_reset_context *reset_context)
5007 struct amdgpu_coredump_info *coredump;
5008 struct drm_device *dev = adev_to_drm(adev);
5010 coredump = kzalloc(sizeof(*coredump), GFP_NOWAIT);
5013 DRM_ERROR("%s: failed to allocate memory for coredump\n", __func__);
5017 coredump->reset_vram_lost = vram_lost;
5019 if (reset_context->job && reset_context->job->vm)
5020 coredump->reset_task_info = reset_context->job->vm->task_info;
5022 coredump->adev = adev;
5024 ktime_get_ts64(&coredump->reset_time);
5026 dev_coredumpm(dev->dev, THIS_MODULE, coredump, 0, GFP_NOWAIT,
5027 amdgpu_devcoredump_read, amdgpu_devcoredump_free);
5031 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
5032 struct amdgpu_reset_context *reset_context)
5034 struct amdgpu_device *tmp_adev = NULL;
5035 bool need_full_reset, skip_hw_reset, vram_lost = false;
5037 bool gpu_reset_for_dev_remove = 0;
5039 /* Try reset handler method first */
5040 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5042 amdgpu_reset_reg_dumps(tmp_adev);
5044 reset_context->reset_device_list = device_list_handle;
5045 r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
5046 /* If reset handler not implemented, continue; otherwise return */
5047 if (r == -EOPNOTSUPP)
5052 /* Reset handler not implemented, use the default method */
5054 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5055 skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
5057 gpu_reset_for_dev_remove =
5058 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
5059 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5062 * ASIC reset has to be done on all XGMI hive nodes ASAP
5063 * to allow proper links negotiation in FW (within 1 sec)
5065 if (!skip_hw_reset && need_full_reset) {
5066 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5067 /* For XGMI run all resets in parallel to speed up the process */
5068 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5069 tmp_adev->gmc.xgmi.pending_reset = false;
5070 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
5073 r = amdgpu_asic_reset(tmp_adev);
5076 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
5077 r, adev_to_drm(tmp_adev)->unique);
5082 /* For XGMI wait for all resets to complete before proceed */
5084 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5085 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5086 flush_work(&tmp_adev->xgmi_reset_work);
5087 r = tmp_adev->asic_reset_res;
5095 if (!r && amdgpu_ras_intr_triggered()) {
5096 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5097 if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
5098 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
5099 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
5102 amdgpu_ras_intr_cleared();
5105 /* Since the mode1 reset affects base ip blocks, the
5106 * phase1 ip blocks need to be resumed. Otherwise there
5107 * will be a BIOS signature error and the psp bootloader
5108 * can't load kdb on the next amdgpu install.
5110 if (gpu_reset_for_dev_remove) {
5111 list_for_each_entry(tmp_adev, device_list_handle, reset_list)
5112 amdgpu_device_ip_resume_phase1(tmp_adev);
5117 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5118 if (need_full_reset) {
5120 r = amdgpu_device_asic_init(tmp_adev);
5122 dev_warn(tmp_adev->dev, "asic atom init failed!");
5124 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
5126 r = amdgpu_device_ip_resume_phase1(tmp_adev);
5130 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
5132 amdgpu_coredump(tmp_adev, vram_lost, reset_context);
5135 DRM_INFO("VRAM is lost due to GPU reset!\n");
5136 amdgpu_inc_vram_lost(tmp_adev);
5139 r = amdgpu_device_fw_loading(tmp_adev);
5143 r = amdgpu_xcp_restore_partition_mode(
5148 r = amdgpu_device_ip_resume_phase2(tmp_adev);
5153 amdgpu_device_fill_reset_magic(tmp_adev);
5156 * Add this ASIC as tracked as reset was already
5157 * complete successfully.
5159 amdgpu_register_gpu_instance(tmp_adev);
5161 if (!reset_context->hive &&
5162 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5163 amdgpu_xgmi_add_device(tmp_adev);
5165 r = amdgpu_device_ip_late_init(tmp_adev);
5169 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
5172 * The GPU enters bad state once faulty pages
5173 * by ECC has reached the threshold, and ras
5174 * recovery is scheduled next. So add one check
5175 * here to break recovery if it indeed exceeds
5176 * bad page threshold, and remind user to
5177 * retire this GPU or setting one bigger
5178 * bad_page_threshold value to fix this once
5179 * probing driver again.
5181 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
5183 amdgpu_ras_resume(tmp_adev);
5189 /* Update PSP FW topology after reset */
5190 if (reset_context->hive &&
5191 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5192 r = amdgpu_xgmi_update_topology(
5193 reset_context->hive, tmp_adev);
5199 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
5200 r = amdgpu_ib_ring_tests(tmp_adev);
5202 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
5203 need_full_reset = true;
5210 r = amdgpu_device_recover_vram(tmp_adev);
5212 tmp_adev->asic_reset_res = r;
5216 if (need_full_reset)
5217 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5219 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5223 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5226 switch (amdgpu_asic_reset_method(adev)) {
5227 case AMD_RESET_METHOD_MODE1:
5228 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5230 case AMD_RESET_METHOD_MODE2:
5231 adev->mp1_state = PP_MP1_STATE_RESET;
5234 adev->mp1_state = PP_MP1_STATE_NONE;
5239 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5241 amdgpu_vf_error_trans_all(adev);
5242 adev->mp1_state = PP_MP1_STATE_NONE;
5245 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5247 struct pci_dev *p = NULL;
5249 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5250 adev->pdev->bus->number, 1);
5252 pm_runtime_enable(&(p->dev));
5253 pm_runtime_resume(&(p->dev));
5259 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5261 enum amd_reset_method reset_method;
5262 struct pci_dev *p = NULL;
5266 * For now, only BACO and mode1 reset are confirmed
5267 * to suffer the audio issue without proper suspended.
5269 reset_method = amdgpu_asic_reset_method(adev);
5270 if ((reset_method != AMD_RESET_METHOD_BACO) &&
5271 (reset_method != AMD_RESET_METHOD_MODE1))
5274 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5275 adev->pdev->bus->number, 1);
5279 expires = pm_runtime_autosuspend_expiration(&(p->dev));
5282 * If we cannot get the audio device autosuspend delay,
5283 * a fixed 4S interval will be used. Considering 3S is
5284 * the audio controller default autosuspend delay setting.
5285 * 4S used here is guaranteed to cover that.
5287 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5289 while (!pm_runtime_status_suspended(&(p->dev))) {
5290 if (!pm_runtime_suspend(&(p->dev)))
5293 if (expires < ktime_get_mono_fast_ns()) {
5294 dev_warn(adev->dev, "failed to suspend display audio\n");
5296 /* TODO: abort the succeeding gpu reset? */
5301 pm_runtime_disable(&(p->dev));
5307 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5309 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5311 #if defined(CONFIG_DEBUG_FS)
5312 if (!amdgpu_sriov_vf(adev))
5313 cancel_work(&adev->reset_work);
5317 cancel_work(&adev->kfd.reset_work);
5319 if (amdgpu_sriov_vf(adev))
5320 cancel_work(&adev->virt.flr_work);
5322 if (con && adev->ras_enabled)
5323 cancel_work(&con->recovery_work);
5328 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5330 * @adev: amdgpu_device pointer
5331 * @job: which job trigger hang
5332 * @reset_context: amdgpu reset context pointer
5334 * Attempt to reset the GPU if it has hung (all asics).
5335 * Attempt to do soft-reset or full-reset and reinitialize Asic
5336 * Returns 0 for success or an error on failure.
5339 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5340 struct amdgpu_job *job,
5341 struct amdgpu_reset_context *reset_context)
5343 struct list_head device_list, *device_list_handle = NULL;
5344 bool job_signaled = false;
5345 struct amdgpu_hive_info *hive = NULL;
5346 struct amdgpu_device *tmp_adev = NULL;
5348 bool need_emergency_restart = false;
5349 bool audio_suspended = false;
5350 bool gpu_reset_for_dev_remove = false;
5352 gpu_reset_for_dev_remove =
5353 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
5354 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5357 * Special case: RAS triggered and full reset isn't supported
5359 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5362 * Flush RAM to disk so that after reboot
5363 * the user can read log and see why the system rebooted.
5365 if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
5366 DRM_WARN("Emergency reboot.");
5369 emergency_restart();
5372 dev_info(adev->dev, "GPU %s begin!\n",
5373 need_emergency_restart ? "jobs stop":"reset");
5375 if (!amdgpu_sriov_vf(adev))
5376 hive = amdgpu_get_xgmi_hive(adev);
5378 mutex_lock(&hive->hive_lock);
5380 reset_context->job = job;
5381 reset_context->hive = hive;
5383 * Build list of devices to reset.
5384 * In case we are in XGMI hive mode, resort the device list
5385 * to put adev in the 1st position.
5387 INIT_LIST_HEAD(&device_list);
5388 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5389 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5390 list_add_tail(&tmp_adev->reset_list, &device_list);
5391 if (gpu_reset_for_dev_remove && adev->shutdown)
5392 tmp_adev->shutdown = true;
5394 if (!list_is_first(&adev->reset_list, &device_list))
5395 list_rotate_to_front(&adev->reset_list, &device_list);
5396 device_list_handle = &device_list;
5398 list_add_tail(&adev->reset_list, &device_list);
5399 device_list_handle = &device_list;
5402 /* We need to lock reset domain only once both for XGMI and single device */
5403 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5405 amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5407 /* block all schedulers and reset given job's ring */
5408 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5410 amdgpu_device_set_mp1_state(tmp_adev);
5413 * Try to put the audio codec into suspend state
5414 * before gpu reset started.
5416 * Due to the power domain of the graphics device
5417 * is shared with AZ power domain. Without this,
5418 * we may change the audio hardware from behind
5419 * the audio driver's back. That will trigger
5420 * some audio codec errors.
5422 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5423 audio_suspended = true;
5425 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5427 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5429 if (!amdgpu_sriov_vf(tmp_adev))
5430 amdgpu_amdkfd_pre_reset(tmp_adev);
5433 * Mark these ASICs to be reseted as untracked first
5434 * And add them back after reset completed
5436 amdgpu_unregister_gpu_instance(tmp_adev);
5438 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5440 /* disable ras on ALL IPs */
5441 if (!need_emergency_restart &&
5442 amdgpu_device_ip_need_full_reset(tmp_adev))
5443 amdgpu_ras_suspend(tmp_adev);
5445 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5446 struct amdgpu_ring *ring = tmp_adev->rings[i];
5448 if (!ring || !ring->sched.thread)
5451 drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5453 if (need_emergency_restart)
5454 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5456 atomic_inc(&tmp_adev->gpu_reset_counter);
5459 if (need_emergency_restart)
5460 goto skip_sched_resume;
5463 * Must check guilty signal here since after this point all old
5464 * HW fences are force signaled.
5466 * job->base holds a reference to parent fence
5468 if (job && dma_fence_is_signaled(&job->hw_fence)) {
5469 job_signaled = true;
5470 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5474 retry: /* Rest of adevs pre asic reset from XGMI hive. */
5475 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5476 if (gpu_reset_for_dev_remove) {
5477 /* Workaroud for ASICs need to disable SMC first */
5478 amdgpu_device_smu_fini_early(tmp_adev);
5480 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5481 /*TODO Should we stop ?*/
5483 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5484 r, adev_to_drm(tmp_adev)->unique);
5485 tmp_adev->asic_reset_res = r;
5489 * Drop all pending non scheduler resets. Scheduler resets
5490 * were already dropped during drm_sched_stop
5492 amdgpu_device_stop_pending_resets(tmp_adev);
5495 /* Actual ASIC resets if needed.*/
5496 /* Host driver will handle XGMI hive reset for SRIOV */
5497 if (amdgpu_sriov_vf(adev)) {
5498 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5500 adev->asic_reset_res = r;
5502 /* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */
5503 if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
5504 IP_VERSION(9, 4, 2) ||
5505 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3))
5506 amdgpu_ras_resume(adev);
5508 r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5509 if (r && r == -EAGAIN)
5512 if (!r && gpu_reset_for_dev_remove)
5518 /* Post ASIC reset for all devs .*/
5519 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5521 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5522 struct amdgpu_ring *ring = tmp_adev->rings[i];
5524 if (!ring || !ring->sched.thread)
5527 drm_sched_start(&ring->sched, true);
5530 if (adev->enable_mes &&
5531 amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(11, 0, 3))
5532 amdgpu_mes_self_test(tmp_adev);
5534 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
5535 drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5537 if (tmp_adev->asic_reset_res)
5538 r = tmp_adev->asic_reset_res;
5540 tmp_adev->asic_reset_res = 0;
5543 /* bad news, how to tell it to userspace ? */
5544 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5545 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5547 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5548 if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5549 DRM_WARN("smart shift update failed\n");
5554 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5555 /* unlock kfd: SRIOV would do it separately */
5556 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5557 amdgpu_amdkfd_post_reset(tmp_adev);
5559 /* kfd_post_reset will do nothing if kfd device is not initialized,
5560 * need to bring up kfd here if it's not be initialized before
5562 if (!adev->kfd.init_complete)
5563 amdgpu_amdkfd_device_init(adev);
5565 if (audio_suspended)
5566 amdgpu_device_resume_display_audio(tmp_adev);
5568 amdgpu_device_unset_mp1_state(tmp_adev);
5570 amdgpu_ras_set_error_query_ready(tmp_adev, true);
5574 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5576 amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5579 mutex_unlock(&hive->hive_lock);
5580 amdgpu_put_xgmi_hive(hive);
5584 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5586 atomic_set(&adev->reset_domain->reset_res, r);
5591 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5593 * @adev: amdgpu_device pointer
5595 * Fetchs and stores in the driver the PCIE capabilities (gen speed
5596 * and lanes) of the slot the device is in. Handles APUs and
5597 * virtualized environments where PCIE config space may not be available.
5599 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5601 struct pci_dev *pdev;
5602 enum pci_bus_speed speed_cap, platform_speed_cap;
5603 enum pcie_link_width platform_link_width;
5605 if (amdgpu_pcie_gen_cap)
5606 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5608 if (amdgpu_pcie_lane_cap)
5609 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5611 /* covers APUs as well */
5612 if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) {
5613 if (adev->pm.pcie_gen_mask == 0)
5614 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5615 if (adev->pm.pcie_mlw_mask == 0)
5616 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5620 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5623 pcie_bandwidth_available(adev->pdev, NULL,
5624 &platform_speed_cap, &platform_link_width);
5626 if (adev->pm.pcie_gen_mask == 0) {
5629 speed_cap = pcie_get_speed_cap(pdev);
5630 if (speed_cap == PCI_SPEED_UNKNOWN) {
5631 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5632 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5633 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5635 if (speed_cap == PCIE_SPEED_32_0GT)
5636 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5637 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5638 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5639 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5640 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5641 else if (speed_cap == PCIE_SPEED_16_0GT)
5642 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5643 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5644 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5645 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5646 else if (speed_cap == PCIE_SPEED_8_0GT)
5647 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5648 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5649 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5650 else if (speed_cap == PCIE_SPEED_5_0GT)
5651 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5652 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5654 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5657 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5658 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5659 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5661 if (platform_speed_cap == PCIE_SPEED_32_0GT)
5662 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5663 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5664 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5665 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5666 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5667 else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5668 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5669 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5670 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5671 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5672 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5673 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5674 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5675 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5676 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5677 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5678 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5680 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5684 if (adev->pm.pcie_mlw_mask == 0) {
5685 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5686 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5688 switch (platform_link_width) {
5690 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5691 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5692 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5693 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5694 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5695 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5696 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5699 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5700 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5701 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5702 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5703 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5704 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5707 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5708 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5709 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5710 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5711 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5714 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5715 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5716 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5717 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5720 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5721 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5722 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5725 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5726 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5729 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5739 * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5741 * @adev: amdgpu_device pointer
5742 * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5744 * Return true if @peer_adev can access (DMA) @adev through the PCIe
5745 * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5748 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5749 struct amdgpu_device *peer_adev)
5751 #ifdef CONFIG_HSA_AMD_P2P
5752 uint64_t address_mask = peer_adev->dev->dma_mask ?
5753 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5754 resource_size_t aper_limit =
5755 adev->gmc.aper_base + adev->gmc.aper_size - 1;
5757 !adev->gmc.xgmi.connected_to_cpu &&
5758 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
5760 return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5761 adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5762 !(adev->gmc.aper_base & address_mask ||
5763 aper_limit & address_mask));
5769 int amdgpu_device_baco_enter(struct drm_device *dev)
5771 struct amdgpu_device *adev = drm_to_adev(dev);
5772 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5774 if (!amdgpu_device_supports_baco(dev))
5777 if (ras && adev->ras_enabled &&
5778 adev->nbio.funcs->enable_doorbell_interrupt)
5779 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5781 return amdgpu_dpm_baco_enter(adev);
5784 int amdgpu_device_baco_exit(struct drm_device *dev)
5786 struct amdgpu_device *adev = drm_to_adev(dev);
5787 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5790 if (!amdgpu_device_supports_baco(dev))
5793 ret = amdgpu_dpm_baco_exit(adev);
5797 if (ras && adev->ras_enabled &&
5798 adev->nbio.funcs->enable_doorbell_interrupt)
5799 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5801 if (amdgpu_passthrough(adev) &&
5802 adev->nbio.funcs->clear_doorbell_interrupt)
5803 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5809 * amdgpu_pci_error_detected - Called when a PCI error is detected.
5810 * @pdev: PCI device struct
5811 * @state: PCI channel state
5813 * Description: Called when a PCI error is detected.
5815 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5817 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5819 struct drm_device *dev = pci_get_drvdata(pdev);
5820 struct amdgpu_device *adev = drm_to_adev(dev);
5823 DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5825 if (adev->gmc.xgmi.num_physical_nodes > 1) {
5826 DRM_WARN("No support for XGMI hive yet...");
5827 return PCI_ERS_RESULT_DISCONNECT;
5830 adev->pci_channel_state = state;
5833 case pci_channel_io_normal:
5834 return PCI_ERS_RESULT_CAN_RECOVER;
5835 /* Fatal error, prepare for slot reset */
5836 case pci_channel_io_frozen:
5838 * Locking adev->reset_domain->sem will prevent any external access
5839 * to GPU during PCI error recovery
5841 amdgpu_device_lock_reset_domain(adev->reset_domain);
5842 amdgpu_device_set_mp1_state(adev);
5845 * Block any work scheduling as we do for regular GPU reset
5846 * for the duration of the recovery
5848 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5849 struct amdgpu_ring *ring = adev->rings[i];
5851 if (!ring || !ring->sched.thread)
5854 drm_sched_stop(&ring->sched, NULL);
5856 atomic_inc(&adev->gpu_reset_counter);
5857 return PCI_ERS_RESULT_NEED_RESET;
5858 case pci_channel_io_perm_failure:
5859 /* Permanent error, prepare for device removal */
5860 return PCI_ERS_RESULT_DISCONNECT;
5863 return PCI_ERS_RESULT_NEED_RESET;
5867 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5868 * @pdev: pointer to PCI device
5870 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5873 DRM_INFO("PCI error: mmio enabled callback!!\n");
5875 /* TODO - dump whatever for debugging purposes */
5877 /* This called only if amdgpu_pci_error_detected returns
5878 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5879 * works, no need to reset slot.
5882 return PCI_ERS_RESULT_RECOVERED;
5886 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5887 * @pdev: PCI device struct
5889 * Description: This routine is called by the pci error recovery
5890 * code after the PCI slot has been reset, just before we
5891 * should resume normal operations.
5893 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5895 struct drm_device *dev = pci_get_drvdata(pdev);
5896 struct amdgpu_device *adev = drm_to_adev(dev);
5898 struct amdgpu_reset_context reset_context;
5900 struct list_head device_list;
5902 DRM_INFO("PCI error: slot reset callback!!\n");
5904 memset(&reset_context, 0, sizeof(reset_context));
5906 INIT_LIST_HEAD(&device_list);
5907 list_add_tail(&adev->reset_list, &device_list);
5909 /* wait for asic to come out of reset */
5912 /* Restore PCI confspace */
5913 amdgpu_device_load_pci_state(pdev);
5915 /* confirm ASIC came out of reset */
5916 for (i = 0; i < adev->usec_timeout; i++) {
5917 memsize = amdgpu_asic_get_config_memsize(adev);
5919 if (memsize != 0xffffffff)
5923 if (memsize == 0xffffffff) {
5928 reset_context.method = AMD_RESET_METHOD_NONE;
5929 reset_context.reset_req_dev = adev;
5930 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5931 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5933 adev->no_hw_access = true;
5934 r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5935 adev->no_hw_access = false;
5939 r = amdgpu_do_asic_reset(&device_list, &reset_context);
5943 if (amdgpu_device_cache_pci_state(adev->pdev))
5944 pci_restore_state(adev->pdev);
5946 DRM_INFO("PCIe error recovery succeeded\n");
5948 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5949 amdgpu_device_unset_mp1_state(adev);
5950 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5953 return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5957 * amdgpu_pci_resume() - resume normal ops after PCI reset
5958 * @pdev: pointer to PCI device
5960 * Called when the error recovery driver tells us that its
5961 * OK to resume normal operation.
5963 void amdgpu_pci_resume(struct pci_dev *pdev)
5965 struct drm_device *dev = pci_get_drvdata(pdev);
5966 struct amdgpu_device *adev = drm_to_adev(dev);
5970 DRM_INFO("PCI error: resume callback!!\n");
5972 /* Only continue execution for the case of pci_channel_io_frozen */
5973 if (adev->pci_channel_state != pci_channel_io_frozen)
5976 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5977 struct amdgpu_ring *ring = adev->rings[i];
5979 if (!ring || !ring->sched.thread)
5982 drm_sched_start(&ring->sched, true);
5985 amdgpu_device_unset_mp1_state(adev);
5986 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5989 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5991 struct drm_device *dev = pci_get_drvdata(pdev);
5992 struct amdgpu_device *adev = drm_to_adev(dev);
5995 r = pci_save_state(pdev);
5997 kfree(adev->pci_state);
5999 adev->pci_state = pci_store_saved_state(pdev);
6001 if (!adev->pci_state) {
6002 DRM_ERROR("Failed to store PCI saved state");
6006 DRM_WARN("Failed to save PCI state, err:%d\n", r);
6013 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
6015 struct drm_device *dev = pci_get_drvdata(pdev);
6016 struct amdgpu_device *adev = drm_to_adev(dev);
6019 if (!adev->pci_state)
6022 r = pci_load_saved_state(pdev, adev->pci_state);
6025 pci_restore_state(pdev);
6027 DRM_WARN("Failed to load PCI state, err:%d\n", r);
6034 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
6035 struct amdgpu_ring *ring)
6037 #ifdef CONFIG_X86_64
6038 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6041 if (adev->gmc.xgmi.connected_to_cpu)
6044 if (ring && ring->funcs->emit_hdp_flush)
6045 amdgpu_ring_emit_hdp_flush(ring);
6047 amdgpu_asic_flush_hdp(adev, ring);
6050 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
6051 struct amdgpu_ring *ring)
6053 #ifdef CONFIG_X86_64
6054 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6057 if (adev->gmc.xgmi.connected_to_cpu)
6060 amdgpu_asic_invalidate_hdp(adev, ring);
6063 int amdgpu_in_reset(struct amdgpu_device *adev)
6065 return atomic_read(&adev->reset_domain->in_gpu_reset);
6069 * amdgpu_device_halt() - bring hardware to some kind of halt state
6071 * @adev: amdgpu_device pointer
6073 * Bring hardware to some kind of halt state so that no one can touch it
6074 * any more. It will help to maintain error context when error occurred.
6075 * Compare to a simple hang, the system will keep stable at least for SSH
6076 * access. Then it should be trivial to inspect the hardware state and
6077 * see what's going on. Implemented as following:
6079 * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
6080 * clears all CPU mappings to device, disallows remappings through page faults
6081 * 2. amdgpu_irq_disable_all() disables all interrupts
6082 * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
6083 * 4. set adev->no_hw_access to avoid potential crashes after setp 5
6084 * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
6085 * 6. pci_disable_device() and pci_wait_for_pending_transaction()
6086 * flush any in flight DMA operations
6088 void amdgpu_device_halt(struct amdgpu_device *adev)
6090 struct pci_dev *pdev = adev->pdev;
6091 struct drm_device *ddev = adev_to_drm(adev);
6093 amdgpu_xcp_dev_unplug(adev);
6094 drm_dev_unplug(ddev);
6096 amdgpu_irq_disable_all(adev);
6098 amdgpu_fence_driver_hw_fini(adev);
6100 adev->no_hw_access = true;
6102 amdgpu_device_unmap_mmio(adev);
6104 pci_disable_device(pdev);
6105 pci_wait_for_pending_transaction(pdev);
6108 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
6111 unsigned long flags, address, data;
6114 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6115 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6117 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6118 WREG32(address, reg * 4);
6119 (void)RREG32(address);
6121 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6125 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
6128 unsigned long flags, address, data;
6130 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6131 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6133 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6134 WREG32(address, reg * 4);
6135 (void)RREG32(address);
6138 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6142 * amdgpu_device_switch_gang - switch to a new gang
6143 * @adev: amdgpu_device pointer
6144 * @gang: the gang to switch to
6146 * Try to switch to a new gang.
6147 * Returns: NULL if we switched to the new gang or a reference to the current
6150 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
6151 struct dma_fence *gang)
6153 struct dma_fence *old = NULL;
6158 old = dma_fence_get_rcu_safe(&adev->gang_submit);
6164 if (!dma_fence_is_signaled(old))
6167 } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
6174 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
6176 switch (adev->asic_type) {
6177 #ifdef CONFIG_DRM_AMDGPU_SI
6181 /* chips with no display hardware */
6183 #ifdef CONFIG_DRM_AMDGPU_SI
6189 #ifdef CONFIG_DRM_AMDGPU_CIK
6198 case CHIP_POLARIS10:
6199 case CHIP_POLARIS11:
6200 case CHIP_POLARIS12:
6204 /* chips with display hardware */
6208 if (!amdgpu_ip_version(adev, DCE_HWIP, 0) ||
6209 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
6215 uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
6216 uint32_t inst, uint32_t reg_addr, char reg_name[],
6217 uint32_t expected_value, uint32_t mask)
6221 uint32_t tmp_ = RREG32(reg_addr);
6222 uint32_t loop = adev->usec_timeout;
6224 while ((tmp_ & (mask)) != (expected_value)) {
6226 loop = adev->usec_timeout;
6230 tmp_ = RREG32(reg_addr);
6233 DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
6234 inst, reg_name, (uint32_t)expected_value,
6235 (uint32_t)(tmp_ & (mask)));