2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/kthread.h>
29 #include <linux/console.h>
30 #include <linux/slab.h>
32 #include <drm/drm_crtc_helper.h>
33 #include <drm/drm_atomic_helper.h>
34 #include <drm/amdgpu_drm.h>
35 #include <linux/vgaarb.h>
36 #include <linux/vga_switcheroo.h>
37 #include <linux/efi.h>
39 #include "amdgpu_trace.h"
40 #include "amdgpu_i2c.h"
42 #include "amdgpu_atombios.h"
43 #include "amdgpu_atomfirmware.h"
45 #ifdef CONFIG_DRM_AMDGPU_SI
48 #ifdef CONFIG_DRM_AMDGPU_CIK
53 #include "bif/bif_4_1_d.h"
54 #include <linux/pci.h>
55 #include <linux/firmware.h>
56 #include "amdgpu_vf_error.h"
58 #include "amdgpu_amdkfd.h"
59 #include "amdgpu_pm.h"
61 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
62 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
64 #define AMDGPU_RESUME_MS 2000
66 static const char *amdgpu_asic_name[] = {
90 bool amdgpu_device_is_px(struct drm_device *dev)
92 struct amdgpu_device *adev = dev->dev_private;
94 if (adev->flags & AMD_IS_PX)
100 * MMIO register access helper functions.
102 uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
107 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
108 return amdgpu_virt_kiq_rreg(adev, reg);
110 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
111 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
115 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
116 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
117 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
118 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
120 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
124 void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
127 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
129 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
130 adev->last_mm_index = v;
133 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
134 return amdgpu_virt_kiq_wreg(adev, reg, v);
136 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
137 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
141 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
142 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
143 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
144 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
147 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
152 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
154 if ((reg * 4) < adev->rio_mem_size)
155 return ioread32(adev->rio_mem + (reg * 4));
157 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
158 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
162 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
164 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
165 adev->last_mm_index = v;
168 if ((reg * 4) < adev->rio_mem_size)
169 iowrite32(v, adev->rio_mem + (reg * 4));
171 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
172 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
175 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
181 * amdgpu_mm_rdoorbell - read a doorbell dword
183 * @adev: amdgpu_device pointer
184 * @index: doorbell index
186 * Returns the value in the doorbell aperture at the
187 * requested doorbell index (CIK).
189 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
191 if (index < adev->doorbell.num_doorbells) {
192 return readl(adev->doorbell.ptr + index);
194 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
200 * amdgpu_mm_wdoorbell - write a doorbell dword
202 * @adev: amdgpu_device pointer
203 * @index: doorbell index
206 * Writes @v to the doorbell aperture at the
207 * requested doorbell index (CIK).
209 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
211 if (index < adev->doorbell.num_doorbells) {
212 writel(v, adev->doorbell.ptr + index);
214 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
219 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
221 * @adev: amdgpu_device pointer
222 * @index: doorbell index
224 * Returns the value in the doorbell aperture at the
225 * requested doorbell index (VEGA10+).
227 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
229 if (index < adev->doorbell.num_doorbells) {
230 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
232 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
238 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
240 * @adev: amdgpu_device pointer
241 * @index: doorbell index
244 * Writes @v to the doorbell aperture at the
245 * requested doorbell index (VEGA10+).
247 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
249 if (index < adev->doorbell.num_doorbells) {
250 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
252 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
257 * amdgpu_invalid_rreg - dummy reg read function
259 * @adev: amdgpu device pointer
260 * @reg: offset of register
262 * Dummy register read function. Used for register blocks
263 * that certain asics don't have (all asics).
264 * Returns the value in the register.
266 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
268 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
274 * amdgpu_invalid_wreg - dummy reg write function
276 * @adev: amdgpu device pointer
277 * @reg: offset of register
278 * @v: value to write to the register
280 * Dummy register read function. Used for register blocks
281 * that certain asics don't have (all asics).
283 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
285 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
291 * amdgpu_block_invalid_rreg - dummy reg read function
293 * @adev: amdgpu device pointer
294 * @block: offset of instance
295 * @reg: offset of register
297 * Dummy register read function. Used for register blocks
298 * that certain asics don't have (all asics).
299 * Returns the value in the register.
301 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
302 uint32_t block, uint32_t reg)
304 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
311 * amdgpu_block_invalid_wreg - dummy reg write function
313 * @adev: amdgpu device pointer
314 * @block: offset of instance
315 * @reg: offset of register
316 * @v: value to write to the register
318 * Dummy register read function. Used for register blocks
319 * that certain asics don't have (all asics).
321 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
323 uint32_t reg, uint32_t v)
325 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
330 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
332 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
333 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
334 &adev->vram_scratch.robj,
335 &adev->vram_scratch.gpu_addr,
336 (void **)&adev->vram_scratch.ptr);
339 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
341 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
345 * amdgpu_device_program_register_sequence - program an array of registers.
347 * @adev: amdgpu_device pointer
348 * @registers: pointer to the register array
349 * @array_size: size of the register array
351 * Programs an array or registers with and and or masks.
352 * This is a helper for setting golden registers.
354 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
355 const u32 *registers,
356 const u32 array_size)
358 u32 tmp, reg, and_mask, or_mask;
364 for (i = 0; i < array_size; i +=3) {
365 reg = registers[i + 0];
366 and_mask = registers[i + 1];
367 or_mask = registers[i + 2];
369 if (and_mask == 0xffffffff) {
380 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
382 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
386 * GPU doorbell aperture helpers function.
389 * amdgpu_device_doorbell_init - Init doorbell driver information.
391 * @adev: amdgpu_device pointer
393 * Init doorbell driver information (CIK)
394 * Returns 0 on success, error on failure.
396 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
398 /* No doorbell on SI hardware generation */
399 if (adev->asic_type < CHIP_BONAIRE) {
400 adev->doorbell.base = 0;
401 adev->doorbell.size = 0;
402 adev->doorbell.num_doorbells = 0;
403 adev->doorbell.ptr = NULL;
407 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
410 /* doorbell bar mapping */
411 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
412 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
414 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
415 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
416 if (adev->doorbell.num_doorbells == 0)
419 adev->doorbell.ptr = ioremap(adev->doorbell.base,
420 adev->doorbell.num_doorbells *
422 if (adev->doorbell.ptr == NULL)
429 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
431 * @adev: amdgpu_device pointer
433 * Tear down doorbell driver information (CIK)
435 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
437 iounmap(adev->doorbell.ptr);
438 adev->doorbell.ptr = NULL;
444 * amdgpu_device_wb_*()
445 * Writeback is the method by which the GPU updates special pages in memory
446 * with the status of certain GPU events (fences, ring pointers,etc.).
450 * amdgpu_device_wb_fini - Disable Writeback and free memory
452 * @adev: amdgpu_device pointer
454 * Disables Writeback and frees the Writeback memory (all asics).
455 * Used at driver shutdown.
457 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
459 if (adev->wb.wb_obj) {
460 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
462 (void **)&adev->wb.wb);
463 adev->wb.wb_obj = NULL;
468 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
470 * @adev: amdgpu_device pointer
472 * Initializes writeback and allocates writeback memory (all asics).
473 * Used at driver startup.
474 * Returns 0 on success or an -error on failure.
476 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
480 if (adev->wb.wb_obj == NULL) {
481 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
482 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
483 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
484 &adev->wb.wb_obj, &adev->wb.gpu_addr,
485 (void **)&adev->wb.wb);
487 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
491 adev->wb.num_wb = AMDGPU_MAX_WB;
492 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
494 /* clear wb memory */
495 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
502 * amdgpu_device_wb_get - Allocate a wb entry
504 * @adev: amdgpu_device pointer
507 * Allocate a wb slot for use by the driver (all asics).
508 * Returns 0 on success or -EINVAL on failure.
510 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
512 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
514 if (offset < adev->wb.num_wb) {
515 __set_bit(offset, adev->wb.used);
516 *wb = offset << 3; /* convert to dw offset */
524 * amdgpu_device_wb_free - Free a wb entry
526 * @adev: amdgpu_device pointer
529 * Free a wb slot allocated for use by the driver (all asics)
531 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
533 if (wb < adev->wb.num_wb)
534 __clear_bit(wb >> 3, adev->wb.used);
538 * amdgpu_vram_location - try to find VRAM location
539 * @adev: amdgpu device structure holding all necessary informations
540 * @mc: memory controller structure holding memory informations
541 * @base: base address at which to put VRAM
543 * Function will try to place VRAM at base address provided
546 void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
548 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
550 mc->vram_start = base;
551 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
552 if (limit && limit < mc->real_vram_size)
553 mc->real_vram_size = limit;
554 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
555 mc->mc_vram_size >> 20, mc->vram_start,
556 mc->vram_end, mc->real_vram_size >> 20);
560 * amdgpu_gart_location - try to find GTT location
561 * @adev: amdgpu device structure holding all necessary informations
562 * @mc: memory controller structure holding memory informations
564 * Function will place try to place GTT before or after VRAM.
566 * If GTT size is bigger than space left then we ajust GTT size.
567 * Thus function will never fails.
569 * FIXME: when reducing GTT size align new size on power of 2.
571 void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
573 u64 size_af, size_bf;
575 size_af = adev->mc.mc_mask - mc->vram_end;
576 size_bf = mc->vram_start;
577 if (size_bf > size_af) {
578 if (mc->gart_size > size_bf) {
579 dev_warn(adev->dev, "limiting GTT\n");
580 mc->gart_size = size_bf;
584 if (mc->gart_size > size_af) {
585 dev_warn(adev->dev, "limiting GTT\n");
586 mc->gart_size = size_af;
588 /* VCE doesn't like it when BOs cross a 4GB segment, so align
589 * the GART base on a 4GB boundary as well.
591 mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
593 mc->gart_end = mc->gart_start + mc->gart_size - 1;
594 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
595 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
599 * Firmware Reservation functions
602 * amdgpu_fw_reserve_vram_fini - free fw reserved vram
604 * @adev: amdgpu_device pointer
606 * free fw reserved vram if it has been reserved.
608 void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev)
610 amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
611 NULL, &adev->fw_vram_usage.va);
615 * amdgpu_fw_reserve_vram_init - create bo vram reservation from fw
617 * @adev: amdgpu_device pointer
619 * create bo vram reservation from fw.
621 int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
623 struct ttm_operation_ctx ctx = { false, false };
626 u64 vram_size = adev->mc.visible_vram_size;
627 u64 offset = adev->fw_vram_usage.start_offset;
628 u64 size = adev->fw_vram_usage.size;
629 struct amdgpu_bo *bo;
631 adev->fw_vram_usage.va = NULL;
632 adev->fw_vram_usage.reserved_bo = NULL;
634 if (adev->fw_vram_usage.size > 0 &&
635 adev->fw_vram_usage.size <= vram_size) {
637 r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
638 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
639 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
640 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0,
641 &adev->fw_vram_usage.reserved_bo);
645 r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
649 /* remove the original mem node and create a new one at the
652 bo = adev->fw_vram_usage.reserved_bo;
653 offset = ALIGN(offset, PAGE_SIZE);
654 for (i = 0; i < bo->placement.num_placement; ++i) {
655 bo->placements[i].fpfn = offset >> PAGE_SHIFT;
656 bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
659 ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
660 r = ttm_bo_mem_space(&bo->tbo, &bo->placement,
665 r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
666 AMDGPU_GEM_DOMAIN_VRAM,
667 adev->fw_vram_usage.start_offset,
668 (adev->fw_vram_usage.start_offset +
669 adev->fw_vram_usage.size), NULL);
672 r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
673 &adev->fw_vram_usage.va);
677 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
682 amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
684 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
686 amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
688 adev->fw_vram_usage.va = NULL;
689 adev->fw_vram_usage.reserved_bo = NULL;
694 * amdgpu_device_resize_fb_bar - try to resize FB BAR
696 * @adev: amdgpu_device pointer
698 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
699 * to fail, but if any of the BARs is not accessible after the size we abort
700 * driver loading by returning -ENODEV.
702 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
704 u64 space_needed = roundup_pow_of_two(adev->mc.real_vram_size);
705 u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
706 struct pci_bus *root;
707 struct resource *res;
713 if (amdgpu_sriov_vf(adev))
716 /* Check if the root BUS has 64bit memory resources */
717 root = adev->pdev->bus;
721 pci_bus_for_each_resource(root, res, i) {
722 if (res && res->flags & IORESOURCE_MEM_64 &&
723 res->start > 0x100000000ull)
727 /* Trying to resize is pointless without a root hub window above 4GB */
731 /* Disable memory decoding while we change the BAR addresses and size */
732 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
733 pci_write_config_word(adev->pdev, PCI_COMMAND,
734 cmd & ~PCI_COMMAND_MEMORY);
736 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
737 amdgpu_device_doorbell_fini(adev);
738 if (adev->asic_type >= CHIP_BONAIRE)
739 pci_release_resource(adev->pdev, 2);
741 pci_release_resource(adev->pdev, 0);
743 r = pci_resize_resource(adev->pdev, 0, rbar_size);
745 DRM_INFO("Not enough PCI address space for a large BAR.");
746 else if (r && r != -ENOTSUPP)
747 DRM_ERROR("Problem resizing BAR0 (%d).", r);
749 pci_assign_unassigned_bus_resources(adev->pdev->bus);
751 /* When the doorbell or fb BAR isn't available we have no chance of
754 r = amdgpu_device_doorbell_init(adev);
755 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
758 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
764 * GPU helpers function.
767 * amdgpu_need_post - check if the hw need post or not
769 * @adev: amdgpu_device pointer
771 * Check if the asic has been initialized (all asics) at driver startup
772 * or post is needed if hw reset is performed.
773 * Returns true if need or false if not.
775 bool amdgpu_need_post(struct amdgpu_device *adev)
779 if (amdgpu_sriov_vf(adev))
782 if (amdgpu_passthrough(adev)) {
783 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
784 * some old smc fw still need driver do vPost otherwise gpu hang, while
785 * those smc fw version above 22.15 doesn't have this flaw, so we force
786 * vpost executed for smc version below 22.15
788 if (adev->asic_type == CHIP_FIJI) {
791 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
792 /* force vPost if error occured */
796 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
797 if (fw_ver < 0x00160e00)
802 if (adev->has_hw_reset) {
803 adev->has_hw_reset = false;
807 /* bios scratch used on CIK+ */
808 if (adev->asic_type >= CHIP_BONAIRE)
809 return amdgpu_atombios_scratch_need_asic_init(adev);
811 /* check MEM_SIZE for older asics */
812 reg = amdgpu_asic_get_config_memsize(adev);
814 if ((reg != 0) && (reg != 0xffffffff))
821 * amdgpu_dummy_page_init - init dummy page used by the driver
823 * @adev: amdgpu_device pointer
825 * Allocate the dummy page used by the driver (all asics).
826 * This dummy page is used by the driver as a filler for gart entries
827 * when pages are taken out of the GART
828 * Returns 0 on sucess, -ENOMEM on failure.
830 int amdgpu_dummy_page_init(struct amdgpu_device *adev)
832 if (adev->dummy_page.page)
834 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
835 if (adev->dummy_page.page == NULL)
837 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
838 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
839 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
840 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
841 __free_page(adev->dummy_page.page);
842 adev->dummy_page.page = NULL;
849 * amdgpu_dummy_page_fini - free dummy page used by the driver
851 * @adev: amdgpu_device pointer
853 * Frees the dummy page used by the driver (all asics).
855 void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
857 if (adev->dummy_page.page == NULL)
859 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
860 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
861 __free_page(adev->dummy_page.page);
862 adev->dummy_page.page = NULL;
865 /* if we get transitioned to only one device, take VGA back */
867 * amdgpu_device_vga_set_decode - enable/disable vga decode
869 * @cookie: amdgpu_device pointer
870 * @state: enable/disable vga decode
872 * Enable/disable vga decode (all asics).
873 * Returns VGA resource flags.
875 static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
877 struct amdgpu_device *adev = cookie;
878 amdgpu_asic_set_vga_state(adev, state);
880 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
881 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
883 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
886 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
888 /* defines number of bits in page table versus page directory,
889 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
890 * page table and the remaining bits are in the page directory */
891 if (amdgpu_vm_block_size == -1)
894 if (amdgpu_vm_block_size < 9) {
895 dev_warn(adev->dev, "VM page table size (%d) too small\n",
896 amdgpu_vm_block_size);
897 amdgpu_vm_block_size = -1;
901 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
903 /* no need to check the default value */
904 if (amdgpu_vm_size == -1)
907 if (amdgpu_vm_size < 1) {
908 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
915 * amdgpu_device_check_arguments - validate module params
917 * @adev: amdgpu_device pointer
919 * Validates certain module parameters and updates
920 * the associated values used by the driver (all asics).
922 static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
924 if (amdgpu_sched_jobs < 4) {
925 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
927 amdgpu_sched_jobs = 4;
928 } else if (!is_power_of_2(amdgpu_sched_jobs)){
929 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
931 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
934 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
935 /* gart size must be greater or equal to 32M */
936 dev_warn(adev->dev, "gart size (%d) too small\n",
938 amdgpu_gart_size = -1;
941 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
942 /* gtt size must be greater or equal to 32M */
943 dev_warn(adev->dev, "gtt size (%d) too small\n",
945 amdgpu_gtt_size = -1;
948 /* valid range is between 4 and 9 inclusive */
949 if (amdgpu_vm_fragment_size != -1 &&
950 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
951 dev_warn(adev->dev, "valid range is between 4 and 9\n");
952 amdgpu_vm_fragment_size = -1;
955 amdgpu_device_check_vm_size(adev);
957 amdgpu_device_check_block_size(adev);
959 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
960 !is_power_of_2(amdgpu_vram_page_split))) {
961 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
962 amdgpu_vram_page_split);
963 amdgpu_vram_page_split = 1024;
966 if (amdgpu_lockup_timeout == 0) {
967 dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n");
968 amdgpu_lockup_timeout = 10000;
973 * amdgpu_switcheroo_set_state - set switcheroo state
975 * @pdev: pci dev pointer
976 * @state: vga_switcheroo state
978 * Callback for the switcheroo driver. Suspends or resumes the
979 * the asics before or after it is powered up using ACPI methods.
981 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
983 struct drm_device *dev = pci_get_drvdata(pdev);
985 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
988 if (state == VGA_SWITCHEROO_ON) {
989 pr_info("amdgpu: switched on\n");
990 /* don't suspend or resume card normally */
991 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
993 amdgpu_device_resume(dev, true, true);
995 dev->switch_power_state = DRM_SWITCH_POWER_ON;
996 drm_kms_helper_poll_enable(dev);
998 pr_info("amdgpu: switched off\n");
999 drm_kms_helper_poll_disable(dev);
1000 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1001 amdgpu_device_suspend(dev, true, true);
1002 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1007 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1009 * @pdev: pci dev pointer
1011 * Callback for the switcheroo driver. Check of the switcheroo
1012 * state can be changed.
1013 * Returns true if the state can be changed, false if not.
1015 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1017 struct drm_device *dev = pci_get_drvdata(pdev);
1020 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1021 * locking inversion with the driver load path. And the access here is
1022 * completely racy anyway. So don't bother with locking for now.
1024 return dev->open_count == 0;
1027 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1028 .set_gpu_state = amdgpu_switcheroo_set_state,
1030 .can_switch = amdgpu_switcheroo_can_switch,
1033 int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
1034 enum amd_ip_block_type block_type,
1035 enum amd_clockgating_state state)
1039 for (i = 0; i < adev->num_ip_blocks; i++) {
1040 if (!adev->ip_blocks[i].status.valid)
1042 if (adev->ip_blocks[i].version->type != block_type)
1044 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1046 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1047 (void *)adev, state);
1049 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1050 adev->ip_blocks[i].version->funcs->name, r);
1055 int amdgpu_set_powergating_state(struct amdgpu_device *adev,
1056 enum amd_ip_block_type block_type,
1057 enum amd_powergating_state state)
1061 for (i = 0; i < adev->num_ip_blocks; i++) {
1062 if (!adev->ip_blocks[i].status.valid)
1064 if (adev->ip_blocks[i].version->type != block_type)
1066 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1068 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1069 (void *)adev, state);
1071 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1072 adev->ip_blocks[i].version->funcs->name, r);
1077 void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1081 for (i = 0; i < adev->num_ip_blocks; i++) {
1082 if (!adev->ip_blocks[i].status.valid)
1084 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1085 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1089 int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1090 enum amd_ip_block_type block_type)
1094 for (i = 0; i < adev->num_ip_blocks; i++) {
1095 if (!adev->ip_blocks[i].status.valid)
1097 if (adev->ip_blocks[i].version->type == block_type) {
1098 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1108 bool amdgpu_is_idle(struct amdgpu_device *adev,
1109 enum amd_ip_block_type block_type)
1113 for (i = 0; i < adev->num_ip_blocks; i++) {
1114 if (!adev->ip_blocks[i].status.valid)
1116 if (adev->ip_blocks[i].version->type == block_type)
1117 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1123 struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1124 enum amd_ip_block_type type)
1128 for (i = 0; i < adev->num_ip_blocks; i++)
1129 if (adev->ip_blocks[i].version->type == type)
1130 return &adev->ip_blocks[i];
1136 * amdgpu_ip_block_version_cmp
1138 * @adev: amdgpu_device pointer
1139 * @type: enum amd_ip_block_type
1140 * @major: major version
1141 * @minor: minor version
1143 * return 0 if equal or greater
1144 * return 1 if smaller or the ip_block doesn't exist
1146 int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
1147 enum amd_ip_block_type type,
1148 u32 major, u32 minor)
1150 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
1152 if (ip_block && ((ip_block->version->major > major) ||
1153 ((ip_block->version->major == major) &&
1154 (ip_block->version->minor >= minor))))
1161 * amdgpu_ip_block_add
1163 * @adev: amdgpu_device pointer
1164 * @ip_block_version: pointer to the IP to add
1166 * Adds the IP block driver information to the collection of IPs
1169 int amdgpu_ip_block_add(struct amdgpu_device *adev,
1170 const struct amdgpu_ip_block_version *ip_block_version)
1172 if (!ip_block_version)
1175 DRM_DEBUG("add ip block number %d <%s>\n", adev->num_ip_blocks,
1176 ip_block_version->funcs->name);
1178 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1183 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1185 adev->enable_virtual_display = false;
1187 if (amdgpu_virtual_display) {
1188 struct drm_device *ddev = adev->ddev;
1189 const char *pci_address_name = pci_name(ddev->pdev);
1190 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1192 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1193 pciaddstr_tmp = pciaddstr;
1194 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1195 pciaddname = strsep(&pciaddname_tmp, ",");
1196 if (!strcmp("all", pciaddname)
1197 || !strcmp(pci_address_name, pciaddname)) {
1201 adev->enable_virtual_display = true;
1204 res = kstrtol(pciaddname_tmp, 10,
1212 adev->mode_info.num_crtc = num_crtc;
1214 adev->mode_info.num_crtc = 1;
1220 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1221 amdgpu_virtual_display, pci_address_name,
1222 adev->enable_virtual_display, adev->mode_info.num_crtc);
1228 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1230 const char *chip_name;
1233 const struct gpu_info_firmware_header_v1_0 *hdr;
1235 adev->firmware.gpu_info_fw = NULL;
1237 switch (adev->asic_type) {
1241 case CHIP_POLARIS11:
1242 case CHIP_POLARIS10:
1243 case CHIP_POLARIS12:
1246 #ifdef CONFIG_DRM_AMDGPU_SI
1253 #ifdef CONFIG_DRM_AMDGPU_CIK
1263 chip_name = "vega10";
1266 chip_name = "raven";
1270 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1271 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1274 "Failed to load gpu_info firmware \"%s\"\n",
1278 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1281 "Failed to validate gpu_info firmware \"%s\"\n",
1286 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1287 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1289 switch (hdr->version_major) {
1292 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1293 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1294 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1296 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1297 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1298 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1299 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1300 adev->gfx.config.max_texture_channel_caches =
1301 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1302 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1303 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1304 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1305 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1306 adev->gfx.config.double_offchip_lds_buf =
1307 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1308 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1309 adev->gfx.cu_info.max_waves_per_simd =
1310 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1311 adev->gfx.cu_info.max_scratch_slots_per_cu =
1312 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1313 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1318 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1326 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1330 amdgpu_device_enable_virtual_display(adev);
1332 switch (adev->asic_type) {
1336 case CHIP_POLARIS11:
1337 case CHIP_POLARIS10:
1338 case CHIP_POLARIS12:
1341 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
1342 adev->family = AMDGPU_FAMILY_CZ;
1344 adev->family = AMDGPU_FAMILY_VI;
1346 r = vi_set_ip_blocks(adev);
1350 #ifdef CONFIG_DRM_AMDGPU_SI
1356 adev->family = AMDGPU_FAMILY_SI;
1357 r = si_set_ip_blocks(adev);
1362 #ifdef CONFIG_DRM_AMDGPU_CIK
1368 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1369 adev->family = AMDGPU_FAMILY_CI;
1371 adev->family = AMDGPU_FAMILY_KV;
1373 r = cik_set_ip_blocks(adev);
1380 if (adev->asic_type == CHIP_RAVEN)
1381 adev->family = AMDGPU_FAMILY_RV;
1383 adev->family = AMDGPU_FAMILY_AI;
1385 r = soc15_set_ip_blocks(adev);
1390 /* FIXME: not supported yet */
1394 r = amdgpu_device_parse_gpu_info_fw(adev);
1398 amdgpu_amdkfd_device_probe(adev);
1400 if (amdgpu_sriov_vf(adev)) {
1401 r = amdgpu_virt_request_full_gpu(adev, true);
1406 for (i = 0; i < adev->num_ip_blocks; i++) {
1407 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1408 DRM_ERROR("disabled ip block: %d <%s>\n",
1409 i, adev->ip_blocks[i].version->funcs->name);
1410 adev->ip_blocks[i].status.valid = false;
1412 if (adev->ip_blocks[i].version->funcs->early_init) {
1413 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
1415 adev->ip_blocks[i].status.valid = false;
1417 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1418 adev->ip_blocks[i].version->funcs->name, r);
1421 adev->ip_blocks[i].status.valid = true;
1424 adev->ip_blocks[i].status.valid = true;
1429 adev->cg_flags &= amdgpu_cg_mask;
1430 adev->pg_flags &= amdgpu_pg_mask;
1435 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1439 for (i = 0; i < adev->num_ip_blocks; i++) {
1440 if (!adev->ip_blocks[i].status.valid)
1442 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
1444 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1445 adev->ip_blocks[i].version->funcs->name, r);
1448 adev->ip_blocks[i].status.sw = true;
1449 /* need to do gmc hw init early so we can allocate gpu mem */
1450 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1451 r = amdgpu_device_vram_scratch_init(adev);
1453 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
1456 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1458 DRM_ERROR("hw_init %d failed %d\n", i, r);
1461 r = amdgpu_device_wb_init(adev);
1463 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
1466 adev->ip_blocks[i].status.hw = true;
1468 /* right after GMC hw init, we create CSA */
1469 if (amdgpu_sriov_vf(adev)) {
1470 r = amdgpu_allocate_static_csa(adev);
1472 DRM_ERROR("allocate CSA failed %d\n", r);
1479 for (i = 0; i < adev->num_ip_blocks; i++) {
1480 if (!adev->ip_blocks[i].status.sw)
1482 /* gmc hw init is done early */
1483 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
1485 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1487 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1488 adev->ip_blocks[i].version->funcs->name, r);
1491 adev->ip_blocks[i].status.hw = true;
1494 amdgpu_amdkfd_device_init(adev);
1496 if (amdgpu_sriov_vf(adev))
1497 amdgpu_virt_release_full_gpu(adev, true);
1502 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
1504 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1507 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
1509 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1510 AMDGPU_RESET_MAGIC_NUM);
1513 static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
1517 for (i = 0; i < adev->num_ip_blocks; i++) {
1518 if (!adev->ip_blocks[i].status.valid)
1520 /* skip CG for VCE/UVD, it's handled specially */
1521 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1522 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1523 /* enable clockgating to save power */
1524 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1527 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1528 adev->ip_blocks[i].version->funcs->name, r);
1536 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
1540 for (i = 0; i < adev->num_ip_blocks; i++) {
1541 if (!adev->ip_blocks[i].status.valid)
1543 if (adev->ip_blocks[i].version->funcs->late_init) {
1544 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1546 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1547 adev->ip_blocks[i].version->funcs->name, r);
1550 adev->ip_blocks[i].status.late_initialized = true;
1554 mod_delayed_work(system_wq, &adev->late_init_work,
1555 msecs_to_jiffies(AMDGPU_RESUME_MS));
1557 amdgpu_device_fill_reset_magic(adev);
1562 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
1566 amdgpu_amdkfd_device_fini(adev);
1567 /* need to disable SMC first */
1568 for (i = 0; i < adev->num_ip_blocks; i++) {
1569 if (!adev->ip_blocks[i].status.hw)
1571 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
1572 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1573 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1574 AMD_CG_STATE_UNGATE);
1576 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1577 adev->ip_blocks[i].version->funcs->name, r);
1580 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
1581 /* XXX handle errors */
1583 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1584 adev->ip_blocks[i].version->funcs->name, r);
1586 adev->ip_blocks[i].status.hw = false;
1591 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1592 if (!adev->ip_blocks[i].status.hw)
1594 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1595 amdgpu_free_static_csa(adev);
1596 amdgpu_device_wb_fini(adev);
1597 amdgpu_device_vram_scratch_fini(adev);
1600 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1601 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1602 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1603 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1604 AMD_CG_STATE_UNGATE);
1606 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1607 adev->ip_blocks[i].version->funcs->name, r);
1612 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
1613 /* XXX handle errors */
1615 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1616 adev->ip_blocks[i].version->funcs->name, r);
1619 adev->ip_blocks[i].status.hw = false;
1622 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1623 if (!adev->ip_blocks[i].status.sw)
1625 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
1626 /* XXX handle errors */
1628 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1629 adev->ip_blocks[i].version->funcs->name, r);
1631 adev->ip_blocks[i].status.sw = false;
1632 adev->ip_blocks[i].status.valid = false;
1635 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1636 if (!adev->ip_blocks[i].status.late_initialized)
1638 if (adev->ip_blocks[i].version->funcs->late_fini)
1639 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1640 adev->ip_blocks[i].status.late_initialized = false;
1643 if (amdgpu_sriov_vf(adev))
1644 if (amdgpu_virt_release_full_gpu(adev, false))
1645 DRM_ERROR("failed to release exclusive mode on fini\n");
1650 static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
1652 struct amdgpu_device *adev =
1653 container_of(work, struct amdgpu_device, late_init_work.work);
1654 amdgpu_device_ip_late_set_cg_state(adev);
1657 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
1661 if (amdgpu_sriov_vf(adev))
1662 amdgpu_virt_request_full_gpu(adev, false);
1664 /* ungate SMC block first */
1665 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1666 AMD_CG_STATE_UNGATE);
1668 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1671 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1672 if (!adev->ip_blocks[i].status.valid)
1674 /* ungate blocks so that suspend can properly shut them down */
1675 if (i != AMD_IP_BLOCK_TYPE_SMC) {
1676 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1677 AMD_CG_STATE_UNGATE);
1679 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1680 adev->ip_blocks[i].version->funcs->name, r);
1683 /* XXX handle errors */
1684 r = adev->ip_blocks[i].version->funcs->suspend(adev);
1685 /* XXX handle errors */
1687 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1688 adev->ip_blocks[i].version->funcs->name, r);
1692 if (amdgpu_sriov_vf(adev))
1693 amdgpu_virt_release_full_gpu(adev, false);
1698 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
1702 static enum amd_ip_block_type ip_order[] = {
1703 AMD_IP_BLOCK_TYPE_GMC,
1704 AMD_IP_BLOCK_TYPE_COMMON,
1705 AMD_IP_BLOCK_TYPE_IH,
1708 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1710 struct amdgpu_ip_block *block;
1712 for (j = 0; j < adev->num_ip_blocks; j++) {
1713 block = &adev->ip_blocks[j];
1715 if (block->version->type != ip_order[i] ||
1716 !block->status.valid)
1719 r = block->version->funcs->hw_init(adev);
1720 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
1727 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
1731 static enum amd_ip_block_type ip_order[] = {
1732 AMD_IP_BLOCK_TYPE_SMC,
1733 AMD_IP_BLOCK_TYPE_PSP,
1734 AMD_IP_BLOCK_TYPE_DCE,
1735 AMD_IP_BLOCK_TYPE_GFX,
1736 AMD_IP_BLOCK_TYPE_SDMA,
1737 AMD_IP_BLOCK_TYPE_UVD,
1738 AMD_IP_BLOCK_TYPE_VCE
1741 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1743 struct amdgpu_ip_block *block;
1745 for (j = 0; j < adev->num_ip_blocks; j++) {
1746 block = &adev->ip_blocks[j];
1748 if (block->version->type != ip_order[i] ||
1749 !block->status.valid)
1752 r = block->version->funcs->hw_init(adev);
1753 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
1760 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
1764 for (i = 0; i < adev->num_ip_blocks; i++) {
1765 if (!adev->ip_blocks[i].status.valid)
1767 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1768 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1769 adev->ip_blocks[i].version->type ==
1770 AMD_IP_BLOCK_TYPE_IH) {
1771 r = adev->ip_blocks[i].version->funcs->resume(adev);
1773 DRM_ERROR("resume of IP block <%s> failed %d\n",
1774 adev->ip_blocks[i].version->funcs->name, r);
1783 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
1787 for (i = 0; i < adev->num_ip_blocks; i++) {
1788 if (!adev->ip_blocks[i].status.valid)
1790 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1791 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1792 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
1794 r = adev->ip_blocks[i].version->funcs->resume(adev);
1796 DRM_ERROR("resume of IP block <%s> failed %d\n",
1797 adev->ip_blocks[i].version->funcs->name, r);
1805 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
1809 r = amdgpu_device_ip_resume_phase1(adev);
1812 r = amdgpu_device_ip_resume_phase2(adev);
1817 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
1819 if (amdgpu_sriov_vf(adev)) {
1820 if (adev->is_atom_fw) {
1821 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
1822 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1824 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
1825 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1828 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
1829 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
1833 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
1835 switch (asic_type) {
1836 #if defined(CONFIG_DRM_AMD_DC)
1842 case CHIP_POLARIS11:
1843 case CHIP_POLARIS10:
1844 case CHIP_POLARIS12:
1847 #if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
1848 return amdgpu_dc != 0;
1852 return amdgpu_dc > 0;
1854 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1857 return amdgpu_dc != 0;
1865 * amdgpu_device_has_dc_support - check if dc is supported
1867 * @adev: amdgpu_device_pointer
1869 * Returns true for supported, false for not supported
1871 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
1873 if (amdgpu_sriov_vf(adev))
1876 return amdgpu_device_asic_has_dc_support(adev->asic_type);
1880 * amdgpu_device_init - initialize the driver
1882 * @adev: amdgpu_device pointer
1883 * @pdev: drm dev pointer
1884 * @pdev: pci dev pointer
1885 * @flags: driver flags
1887 * Initializes the driver info and hw (all asics).
1888 * Returns 0 for success or an error on failure.
1889 * Called at driver startup.
1891 int amdgpu_device_init(struct amdgpu_device *adev,
1892 struct drm_device *ddev,
1893 struct pci_dev *pdev,
1897 bool runtime = false;
1900 adev->shutdown = false;
1901 adev->dev = &pdev->dev;
1904 adev->flags = flags;
1905 adev->asic_type = flags & AMD_ASIC_MASK;
1906 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
1907 adev->mc.gart_size = 512 * 1024 * 1024;
1908 adev->accel_working = false;
1909 adev->num_rings = 0;
1910 adev->mman.buffer_funcs = NULL;
1911 adev->mman.buffer_funcs_ring = NULL;
1912 adev->vm_manager.vm_pte_funcs = NULL;
1913 adev->vm_manager.vm_pte_num_rings = 0;
1914 adev->gart.gart_funcs = NULL;
1915 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
1916 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1918 adev->smc_rreg = &amdgpu_invalid_rreg;
1919 adev->smc_wreg = &amdgpu_invalid_wreg;
1920 adev->pcie_rreg = &amdgpu_invalid_rreg;
1921 adev->pcie_wreg = &amdgpu_invalid_wreg;
1922 adev->pciep_rreg = &amdgpu_invalid_rreg;
1923 adev->pciep_wreg = &amdgpu_invalid_wreg;
1924 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
1925 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
1926 adev->didt_rreg = &amdgpu_invalid_rreg;
1927 adev->didt_wreg = &amdgpu_invalid_wreg;
1928 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
1929 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
1930 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
1931 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
1933 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1934 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
1935 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
1937 /* mutex initialization are all done here so we
1938 * can recall function without having locking issues */
1939 atomic_set(&adev->irq.ih.lock, 0);
1940 mutex_init(&adev->firmware.mutex);
1941 mutex_init(&adev->pm.mutex);
1942 mutex_init(&adev->gfx.gpu_clock_mutex);
1943 mutex_init(&adev->srbm_mutex);
1944 mutex_init(&adev->gfx.pipe_reserve_mutex);
1945 mutex_init(&adev->grbm_idx_mutex);
1946 mutex_init(&adev->mn_lock);
1947 mutex_init(&adev->virt.vf_errors.lock);
1948 hash_init(adev->mn_hash);
1949 mutex_init(&adev->lock_reset);
1951 amdgpu_device_check_arguments(adev);
1953 spin_lock_init(&adev->mmio_idx_lock);
1954 spin_lock_init(&adev->smc_idx_lock);
1955 spin_lock_init(&adev->pcie_idx_lock);
1956 spin_lock_init(&adev->uvd_ctx_idx_lock);
1957 spin_lock_init(&adev->didt_idx_lock);
1958 spin_lock_init(&adev->gc_cac_idx_lock);
1959 spin_lock_init(&adev->se_cac_idx_lock);
1960 spin_lock_init(&adev->audio_endpt_idx_lock);
1961 spin_lock_init(&adev->mm_stats.lock);
1963 INIT_LIST_HEAD(&adev->shadow_list);
1964 mutex_init(&adev->shadow_list_lock);
1966 INIT_LIST_HEAD(&adev->ring_lru_list);
1967 spin_lock_init(&adev->ring_lru_list_lock);
1969 INIT_DELAYED_WORK(&adev->late_init_work,
1970 amdgpu_device_ip_late_init_func_handler);
1972 /* Registers mapping */
1973 /* TODO: block userspace mapping of io register */
1974 if (adev->asic_type >= CHIP_BONAIRE) {
1975 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
1976 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
1978 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
1979 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
1982 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
1983 if (adev->rmmio == NULL) {
1986 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
1987 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
1989 /* doorbell bar mapping */
1990 amdgpu_device_doorbell_init(adev);
1992 /* io port mapping */
1993 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1994 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
1995 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
1996 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2000 if (adev->rio_mem == NULL)
2001 DRM_INFO("PCI I/O BAR is not found.\n");
2003 /* early init functions */
2004 r = amdgpu_device_ip_early_init(adev);
2008 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2009 /* this will fail for cards that aren't VGA class devices, just
2011 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
2013 if (amdgpu_runtime_pm == 1)
2015 if (amdgpu_device_is_px(ddev))
2017 if (!pci_is_thunderbolt_attached(adev->pdev))
2018 vga_switcheroo_register_client(adev->pdev,
2019 &amdgpu_switcheroo_ops, runtime);
2021 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2024 if (!amdgpu_get_bios(adev)) {
2029 r = amdgpu_atombios_init(adev);
2031 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2032 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2036 /* detect if we are with an SRIOV vbios */
2037 amdgpu_device_detect_sriov_bios(adev);
2039 /* Post card if necessary */
2040 if (amdgpu_need_post(adev)) {
2042 dev_err(adev->dev, "no vBIOS found\n");
2046 DRM_INFO("GPU posting now...\n");
2047 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2049 dev_err(adev->dev, "gpu post error!\n");
2054 if (adev->is_atom_fw) {
2055 /* Initialize clocks */
2056 r = amdgpu_atomfirmware_get_clock_info(adev);
2058 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
2059 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
2063 /* Initialize clocks */
2064 r = amdgpu_atombios_get_clock_info(adev);
2066 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
2067 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
2070 /* init i2c buses */
2071 if (!amdgpu_device_has_dc_support(adev))
2072 amdgpu_atombios_i2c_init(adev);
2076 r = amdgpu_fence_driver_init(adev);
2078 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
2079 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
2083 /* init the mode config */
2084 drm_mode_config_init(adev->ddev);
2086 r = amdgpu_device_ip_init(adev);
2088 /* failed in exclusive mode due to timeout */
2089 if (amdgpu_sriov_vf(adev) &&
2090 !amdgpu_sriov_runtime(adev) &&
2091 amdgpu_virt_mmio_blocked(adev) &&
2092 !amdgpu_virt_wait_reset(adev)) {
2093 dev_err(adev->dev, "VF exclusive mode timeout\n");
2094 /* Don't send request since VF is inactive. */
2095 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
2096 adev->virt.ops = NULL;
2100 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
2101 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
2102 amdgpu_device_ip_fini(adev);
2106 adev->accel_working = true;
2108 amdgpu_vm_check_compute_bug(adev);
2110 /* Initialize the buffer migration limit. */
2111 if (amdgpu_moverate >= 0)
2112 max_MBps = amdgpu_moverate;
2114 max_MBps = 8; /* Allow 8 MB/s. */
2115 /* Get a log2 for easy divisions. */
2116 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2118 r = amdgpu_ib_pool_init(adev);
2120 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2121 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2125 r = amdgpu_ib_ring_tests(adev);
2127 DRM_ERROR("ib ring test failed (%d).\n", r);
2129 if (amdgpu_sriov_vf(adev))
2130 amdgpu_virt_init_data_exchange(adev);
2132 amdgpu_fbdev_init(adev);
2134 r = amdgpu_pm_sysfs_init(adev);
2136 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
2138 r = amdgpu_debugfs_gem_init(adev);
2140 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
2142 r = amdgpu_debugfs_regs_init(adev);
2144 DRM_ERROR("registering register debugfs failed (%d).\n", r);
2146 r = amdgpu_debugfs_firmware_init(adev);
2148 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
2150 r = amdgpu_debugfs_init(adev);
2152 DRM_ERROR("Creating debugfs files failed (%d).\n", r);
2154 if ((amdgpu_testing & 1)) {
2155 if (adev->accel_working)
2156 amdgpu_test_moves(adev);
2158 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2160 if (amdgpu_benchmarking) {
2161 if (adev->accel_working)
2162 amdgpu_benchmark(adev, amdgpu_benchmarking);
2164 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2167 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2168 * explicit gating rather than handling it automatically.
2170 r = amdgpu_device_ip_late_init(adev);
2172 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
2173 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
2180 amdgpu_vf_error_trans_all(adev);
2182 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2188 * amdgpu_device_fini - tear down the driver
2190 * @adev: amdgpu_device pointer
2192 * Tear down the driver info (all asics).
2193 * Called at driver shutdown.
2195 void amdgpu_device_fini(struct amdgpu_device *adev)
2199 DRM_INFO("amdgpu: finishing device.\n");
2200 adev->shutdown = true;
2201 if (adev->mode_info.mode_config_initialized)
2202 drm_crtc_force_disable_all(adev->ddev);
2204 amdgpu_ib_pool_fini(adev);
2205 amdgpu_fence_driver_fini(adev);
2206 amdgpu_fbdev_fini(adev);
2207 r = amdgpu_device_ip_fini(adev);
2208 if (adev->firmware.gpu_info_fw) {
2209 release_firmware(adev->firmware.gpu_info_fw);
2210 adev->firmware.gpu_info_fw = NULL;
2212 adev->accel_working = false;
2213 cancel_delayed_work_sync(&adev->late_init_work);
2214 /* free i2c buses */
2215 if (!amdgpu_device_has_dc_support(adev))
2216 amdgpu_i2c_fini(adev);
2217 amdgpu_atombios_fini(adev);
2220 if (!pci_is_thunderbolt_attached(adev->pdev))
2221 vga_switcheroo_unregister_client(adev->pdev);
2222 if (adev->flags & AMD_IS_PX)
2223 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2224 vga_client_register(adev->pdev, NULL, NULL, NULL);
2226 pci_iounmap(adev->pdev, adev->rio_mem);
2227 adev->rio_mem = NULL;
2228 iounmap(adev->rmmio);
2230 amdgpu_device_doorbell_fini(adev);
2231 amdgpu_pm_sysfs_fini(adev);
2232 amdgpu_debugfs_regs_cleanup(adev);
2240 * amdgpu_device_suspend - initiate device suspend
2242 * @pdev: drm dev pointer
2243 * @state: suspend state
2245 * Puts the hw in the suspend state (all asics).
2246 * Returns 0 for success or an error on failure.
2247 * Called at driver suspend.
2249 int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
2251 struct amdgpu_device *adev;
2252 struct drm_crtc *crtc;
2253 struct drm_connector *connector;
2256 if (dev == NULL || dev->dev_private == NULL) {
2260 adev = dev->dev_private;
2262 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2265 drm_kms_helper_poll_disable(dev);
2267 if (!amdgpu_device_has_dc_support(adev)) {
2268 /* turn off display hw */
2269 drm_modeset_lock_all(dev);
2270 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2271 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2273 drm_modeset_unlock_all(dev);
2276 amdgpu_amdkfd_suspend(adev);
2278 /* unpin the front buffers and cursors */
2279 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2280 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2281 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2282 struct amdgpu_bo *robj;
2284 if (amdgpu_crtc->cursor_bo) {
2285 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2286 r = amdgpu_bo_reserve(aobj, true);
2288 amdgpu_bo_unpin(aobj);
2289 amdgpu_bo_unreserve(aobj);
2293 if (rfb == NULL || rfb->obj == NULL) {
2296 robj = gem_to_amdgpu_bo(rfb->obj);
2297 /* don't unpin kernel fb objects */
2298 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
2299 r = amdgpu_bo_reserve(robj, true);
2301 amdgpu_bo_unpin(robj);
2302 amdgpu_bo_unreserve(robj);
2306 /* evict vram memory */
2307 amdgpu_bo_evict_vram(adev);
2309 amdgpu_fence_driver_suspend(adev);
2311 r = amdgpu_device_ip_suspend(adev);
2313 /* evict remaining vram memory
2314 * This second call to evict vram is to evict the gart page table
2317 amdgpu_bo_evict_vram(adev);
2319 pci_save_state(dev->pdev);
2321 /* Shut down the device */
2322 pci_disable_device(dev->pdev);
2323 pci_set_power_state(dev->pdev, PCI_D3hot);
2325 r = amdgpu_asic_reset(adev);
2327 DRM_ERROR("amdgpu asic reset failed\n");
2332 amdgpu_fbdev_set_suspend(adev, 1);
2339 * amdgpu_device_resume - initiate device resume
2341 * @pdev: drm dev pointer
2343 * Bring the hw back to operating state (all asics).
2344 * Returns 0 for success or an error on failure.
2345 * Called at driver resume.
2347 int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2349 struct drm_connector *connector;
2350 struct amdgpu_device *adev = dev->dev_private;
2351 struct drm_crtc *crtc;
2354 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2361 pci_set_power_state(dev->pdev, PCI_D0);
2362 pci_restore_state(dev->pdev);
2363 r = pci_enable_device(dev->pdev);
2369 if (amdgpu_need_post(adev)) {
2370 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2372 DRM_ERROR("amdgpu asic init failed\n");
2375 r = amdgpu_device_ip_resume(adev);
2377 DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
2380 amdgpu_fence_driver_resume(adev);
2383 r = amdgpu_ib_ring_tests(adev);
2385 DRM_ERROR("ib ring test failed (%d).\n", r);
2388 r = amdgpu_device_ip_late_init(adev);
2393 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2394 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2396 if (amdgpu_crtc->cursor_bo) {
2397 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2398 r = amdgpu_bo_reserve(aobj, true);
2400 r = amdgpu_bo_pin(aobj,
2401 AMDGPU_GEM_DOMAIN_VRAM,
2402 &amdgpu_crtc->cursor_addr);
2404 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2405 amdgpu_bo_unreserve(aobj);
2409 r = amdgpu_amdkfd_resume(adev);
2413 /* blat the mode back in */
2415 if (!amdgpu_device_has_dc_support(adev)) {
2417 drm_helper_resume_force_mode(dev);
2419 /* turn on display hw */
2420 drm_modeset_lock_all(dev);
2421 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2422 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2424 drm_modeset_unlock_all(dev);
2427 * There is no equivalent atomic helper to turn on
2428 * display, so we defined our own function for this,
2429 * once suspend resume is supported by the atomic
2430 * framework this will be reworked
2432 amdgpu_dm_display_resume(adev);
2436 drm_kms_helper_poll_enable(dev);
2439 * Most of the connector probing functions try to acquire runtime pm
2440 * refs to ensure that the GPU is powered on when connector polling is
2441 * performed. Since we're calling this from a runtime PM callback,
2442 * trying to acquire rpm refs will cause us to deadlock.
2444 * Since we're guaranteed to be holding the rpm lock, it's safe to
2445 * temporarily disable the rpm helpers so this doesn't deadlock us.
2448 dev->dev->power.disable_depth++;
2450 if (!amdgpu_device_has_dc_support(adev))
2451 drm_helper_hpd_irq_event(dev);
2453 drm_kms_helper_hotplug_event(dev);
2455 dev->dev->power.disable_depth--;
2459 amdgpu_fbdev_set_suspend(adev, 0);
2468 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
2471 bool asic_hang = false;
2473 if (amdgpu_sriov_vf(adev))
2476 for (i = 0; i < adev->num_ip_blocks; i++) {
2477 if (!adev->ip_blocks[i].status.valid)
2479 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2480 adev->ip_blocks[i].status.hang =
2481 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2482 if (adev->ip_blocks[i].status.hang) {
2483 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
2490 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
2494 for (i = 0; i < adev->num_ip_blocks; i++) {
2495 if (!adev->ip_blocks[i].status.valid)
2497 if (adev->ip_blocks[i].status.hang &&
2498 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2499 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
2508 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
2512 for (i = 0; i < adev->num_ip_blocks; i++) {
2513 if (!adev->ip_blocks[i].status.valid)
2515 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2516 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2517 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
2518 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
2519 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2520 if (adev->ip_blocks[i].status.hang) {
2521 DRM_INFO("Some block need full reset!\n");
2529 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
2533 for (i = 0; i < adev->num_ip_blocks; i++) {
2534 if (!adev->ip_blocks[i].status.valid)
2536 if (adev->ip_blocks[i].status.hang &&
2537 adev->ip_blocks[i].version->funcs->soft_reset) {
2538 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
2547 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
2551 for (i = 0; i < adev->num_ip_blocks; i++) {
2552 if (!adev->ip_blocks[i].status.valid)
2554 if (adev->ip_blocks[i].status.hang &&
2555 adev->ip_blocks[i].version->funcs->post_soft_reset)
2556 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
2564 bool amdgpu_need_backup(struct amdgpu_device *adev)
2566 if (adev->flags & AMD_IS_APU)
2569 return amdgpu_gpu_recovery;
2572 static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev,
2573 struct amdgpu_ring *ring,
2574 struct amdgpu_bo *bo,
2575 struct dma_fence **fence)
2583 r = amdgpu_bo_reserve(bo, true);
2586 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2587 /* if bo has been evicted, then no need to recover */
2588 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
2589 r = amdgpu_bo_validate(bo->shadow);
2591 DRM_ERROR("bo validate failed!\n");
2595 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
2598 DRM_ERROR("recover page table failed!\n");
2603 amdgpu_bo_unreserve(bo);
2608 * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
2610 * @adev: amdgpu device pointer
2611 * @reset_flags: output param tells caller the reset result
2613 * attempt to do soft-reset or full-reset and reinitialize Asic
2614 * return 0 means successed otherwise failed
2616 static int amdgpu_device_reset(struct amdgpu_device *adev,
2617 uint64_t* reset_flags)
2619 bool need_full_reset, vram_lost = 0;
2622 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
2624 if (!need_full_reset) {
2625 amdgpu_device_ip_pre_soft_reset(adev);
2626 r = amdgpu_device_ip_soft_reset(adev);
2627 amdgpu_device_ip_post_soft_reset(adev);
2628 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
2629 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2630 need_full_reset = true;
2635 if (need_full_reset) {
2636 r = amdgpu_device_ip_suspend(adev);
2639 r = amdgpu_asic_reset(adev);
2641 amdgpu_atom_asic_init(adev->mode_info.atom_context);
2644 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
2645 r = amdgpu_device_ip_resume_phase1(adev);
2649 vram_lost = amdgpu_device_check_vram_lost(adev);
2651 DRM_ERROR("VRAM is lost!\n");
2652 atomic_inc(&adev->vram_lost_counter);
2655 r = amdgpu_gtt_mgr_recover(
2656 &adev->mman.bdev.man[TTM_PL_TT]);
2660 r = amdgpu_device_ip_resume_phase2(adev);
2665 amdgpu_device_fill_reset_magic(adev);
2671 amdgpu_irq_gpu_reset_resume_helper(adev);
2672 r = amdgpu_ib_ring_tests(adev);
2674 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
2675 r = amdgpu_device_ip_suspend(adev);
2676 need_full_reset = true;
2683 (*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST;
2685 if (need_full_reset)
2686 (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
2693 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
2695 * @adev: amdgpu device pointer
2696 * @reset_flags: output param tells caller the reset result
2698 * do VF FLR and reinitialize Asic
2699 * return 0 means successed otherwise failed
2701 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
2702 uint64_t *reset_flags,
2703 bool from_hypervisor)
2707 if (from_hypervisor)
2708 r = amdgpu_virt_request_full_gpu(adev, true);
2710 r = amdgpu_virt_reset_gpu(adev);
2714 /* Resume IP prior to SMC */
2715 r = amdgpu_device_ip_reinit_early_sriov(adev);
2719 /* we need recover gart prior to run SMC/CP/SDMA resume */
2720 amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
2722 /* now we are okay to resume SMC/CP/SDMA */
2723 r = amdgpu_device_ip_reinit_late_sriov(adev);
2727 amdgpu_irq_gpu_reset_resume_helper(adev);
2728 r = amdgpu_ib_ring_tests(adev);
2730 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2733 /* release full control of GPU after ib test */
2734 amdgpu_virt_release_full_gpu(adev, true);
2737 if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
2738 (*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST;
2739 atomic_inc(&adev->vram_lost_counter);
2742 /* VF FLR or hotlink reset is always full-reset */
2743 (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
2750 * amdgpu_gpu_recover - reset the asic and recover scheduler
2752 * @adev: amdgpu device pointer
2753 * @job: which job trigger hang
2754 * @force forces reset regardless of amdgpu_gpu_recovery
2756 * Attempt to reset the GPU if it has hung (all asics).
2757 * Returns 0 for success or an error on failure.
2759 int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job, bool force)
2761 struct drm_atomic_state *state = NULL;
2762 uint64_t reset_flags = 0;
2765 if (!amdgpu_device_ip_check_soft_reset(adev)) {
2766 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2770 if (!force && (amdgpu_gpu_recovery == 0 ||
2771 (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))) {
2772 DRM_INFO("GPU recovery disabled.\n");
2776 dev_info(adev->dev, "GPU reset begin!\n");
2778 mutex_lock(&adev->lock_reset);
2779 atomic_inc(&adev->gpu_reset_counter);
2780 adev->in_gpu_reset = 1;
2783 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2784 /* store modesetting */
2785 if (amdgpu_device_has_dc_support(adev))
2786 state = drm_atomic_helper_suspend(adev->ddev);
2788 /* block scheduler */
2789 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2790 struct amdgpu_ring *ring = adev->rings[i];
2792 if (!ring || !ring->sched.thread)
2795 /* only focus on the ring hit timeout if &job not NULL */
2796 if (job && job->ring->idx != i)
2799 kthread_park(ring->sched.thread);
2800 drm_sched_hw_job_reset(&ring->sched, &job->base);
2802 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2803 amdgpu_fence_driver_force_completion(ring);
2806 if (amdgpu_sriov_vf(adev))
2807 r = amdgpu_device_reset_sriov(adev, &reset_flags, job ? false : true);
2809 r = amdgpu_device_reset(adev, &reset_flags);
2812 if (((reset_flags & AMDGPU_RESET_INFO_FULLRESET) && !(adev->flags & AMD_IS_APU)) ||
2813 (reset_flags & AMDGPU_RESET_INFO_VRAM_LOST)) {
2814 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2815 struct amdgpu_bo *bo, *tmp;
2816 struct dma_fence *fence = NULL, *next = NULL;
2818 DRM_INFO("recover vram bo from shadow\n");
2819 mutex_lock(&adev->shadow_list_lock);
2820 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2822 amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next);
2824 r = dma_fence_wait(fence, false);
2826 WARN(r, "recovery from shadow isn't completed\n");
2831 dma_fence_put(fence);
2834 mutex_unlock(&adev->shadow_list_lock);
2836 r = dma_fence_wait(fence, false);
2838 WARN(r, "recovery from shadow isn't completed\n");
2840 dma_fence_put(fence);
2843 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2844 struct amdgpu_ring *ring = adev->rings[i];
2846 if (!ring || !ring->sched.thread)
2849 /* only focus on the ring hit timeout if &job not NULL */
2850 if (job && job->ring->idx != i)
2853 drm_sched_job_recovery(&ring->sched);
2854 kthread_unpark(ring->sched.thread);
2857 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2858 struct amdgpu_ring *ring = adev->rings[i];
2860 if (!ring || !ring->sched.thread)
2863 /* only focus on the ring hit timeout if &job not NULL */
2864 if (job && job->ring->idx != i)
2867 kthread_unpark(adev->rings[i]->sched.thread);
2871 if (amdgpu_device_has_dc_support(adev)) {
2872 if (drm_atomic_helper_resume(adev->ddev, state))
2873 dev_info(adev->dev, "drm resume failed:%d\n", r);
2874 amdgpu_dm_display_resume(adev);
2876 drm_helper_resume_force_mode(adev->ddev);
2879 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2882 /* bad news, how to tell it to userspace ? */
2883 dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
2884 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
2886 dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
2889 amdgpu_vf_error_trans_all(adev);
2890 adev->in_gpu_reset = 0;
2891 mutex_unlock(&adev->lock_reset);
2895 void amdgpu_get_pcie_info(struct amdgpu_device *adev)
2900 if (amdgpu_pcie_gen_cap)
2901 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
2903 if (amdgpu_pcie_lane_cap)
2904 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
2906 /* covers APUs as well */
2907 if (pci_is_root_bus(adev->pdev->bus)) {
2908 if (adev->pm.pcie_gen_mask == 0)
2909 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2910 if (adev->pm.pcie_mlw_mask == 0)
2911 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
2915 if (adev->pm.pcie_gen_mask == 0) {
2916 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2918 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
2919 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2920 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
2922 if (mask & DRM_PCIE_SPEED_25)
2923 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
2924 if (mask & DRM_PCIE_SPEED_50)
2925 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
2926 if (mask & DRM_PCIE_SPEED_80)
2927 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
2929 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2932 if (adev->pm.pcie_mlw_mask == 0) {
2933 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
2937 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
2938 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2939 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2940 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2941 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2942 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2943 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2946 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2947 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2948 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2949 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2950 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2951 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2954 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2955 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2956 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2957 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2958 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2961 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2962 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2963 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2964 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2967 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2968 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2969 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2972 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2973 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2976 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
2982 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;