2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/kthread.h>
29 #include <linux/console.h>
30 #include <linux/slab.h>
31 #include <linux/debugfs.h>
33 #include <drm/drm_crtc_helper.h>
34 #include <drm/amdgpu_drm.h>
35 #include <linux/vgaarb.h>
36 #include <linux/vga_switcheroo.h>
37 #include <linux/efi.h>
39 #include "amdgpu_trace.h"
40 #include "amdgpu_i2c.h"
42 #include "amdgpu_atombios.h"
44 #ifdef CONFIG_DRM_AMDGPU_SI
47 #ifdef CONFIG_DRM_AMDGPU_CIK
51 #include "bif/bif_4_1_d.h"
52 #include <linux/pci.h>
53 #include <linux/firmware.h>
55 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
56 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
58 static const char *amdgpu_asic_name[] = {
80 bool amdgpu_device_is_px(struct drm_device *dev)
82 struct amdgpu_device *adev = dev->dev_private;
84 if (adev->flags & AMD_IS_PX)
90 * MMIO register access helper functions.
92 uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
97 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
98 BUG_ON(in_interrupt());
99 return amdgpu_virt_kiq_rreg(adev, reg);
102 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
103 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
107 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
108 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
109 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
110 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
112 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
116 void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
119 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
121 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
122 BUG_ON(in_interrupt());
123 return amdgpu_virt_kiq_wreg(adev, reg, v);
126 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
127 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
131 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
132 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
133 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
134 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
138 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
140 if ((reg * 4) < adev->rio_mem_size)
141 return ioread32(adev->rio_mem + (reg * 4));
143 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
144 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
148 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
151 if ((reg * 4) < adev->rio_mem_size)
152 iowrite32(v, adev->rio_mem + (reg * 4));
154 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
155 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
160 * amdgpu_mm_rdoorbell - read a doorbell dword
162 * @adev: amdgpu_device pointer
163 * @index: doorbell index
165 * Returns the value in the doorbell aperture at the
166 * requested doorbell index (CIK).
168 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
170 if (index < adev->doorbell.num_doorbells) {
171 return readl(adev->doorbell.ptr + index);
173 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
179 * amdgpu_mm_wdoorbell - write a doorbell dword
181 * @adev: amdgpu_device pointer
182 * @index: doorbell index
185 * Writes @v to the doorbell aperture at the
186 * requested doorbell index (CIK).
188 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
190 if (index < adev->doorbell.num_doorbells) {
191 writel(v, adev->doorbell.ptr + index);
193 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
198 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
200 * @adev: amdgpu_device pointer
201 * @index: doorbell index
203 * Returns the value in the doorbell aperture at the
204 * requested doorbell index (VEGA10+).
206 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
208 if (index < adev->doorbell.num_doorbells) {
209 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
211 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
217 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
219 * @adev: amdgpu_device pointer
220 * @index: doorbell index
223 * Writes @v to the doorbell aperture at the
224 * requested doorbell index (VEGA10+).
226 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
228 if (index < adev->doorbell.num_doorbells) {
229 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
231 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
236 * amdgpu_invalid_rreg - dummy reg read function
238 * @adev: amdgpu device pointer
239 * @reg: offset of register
241 * Dummy register read function. Used for register blocks
242 * that certain asics don't have (all asics).
243 * Returns the value in the register.
245 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
247 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
253 * amdgpu_invalid_wreg - dummy reg write function
255 * @adev: amdgpu device pointer
256 * @reg: offset of register
257 * @v: value to write to the register
259 * Dummy register read function. Used for register blocks
260 * that certain asics don't have (all asics).
262 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
264 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
270 * amdgpu_block_invalid_rreg - dummy reg read function
272 * @adev: amdgpu device pointer
273 * @block: offset of instance
274 * @reg: offset of register
276 * Dummy register read function. Used for register blocks
277 * that certain asics don't have (all asics).
278 * Returns the value in the register.
280 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
281 uint32_t block, uint32_t reg)
283 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
290 * amdgpu_block_invalid_wreg - dummy reg write function
292 * @adev: amdgpu device pointer
293 * @block: offset of instance
294 * @reg: offset of register
295 * @v: value to write to the register
297 * Dummy register read function. Used for register blocks
298 * that certain asics don't have (all asics).
300 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
302 uint32_t reg, uint32_t v)
304 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
309 static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
313 if (adev->vram_scratch.robj == NULL) {
314 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
315 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
316 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
317 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
318 NULL, NULL, &adev->vram_scratch.robj);
324 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
325 if (unlikely(r != 0))
327 r = amdgpu_bo_pin(adev->vram_scratch.robj,
328 AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
330 amdgpu_bo_unreserve(adev->vram_scratch.robj);
333 r = amdgpu_bo_kmap(adev->vram_scratch.robj,
334 (void **)&adev->vram_scratch.ptr);
336 amdgpu_bo_unpin(adev->vram_scratch.robj);
337 amdgpu_bo_unreserve(adev->vram_scratch.robj);
342 static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
346 if (adev->vram_scratch.robj == NULL) {
349 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
350 if (likely(r == 0)) {
351 amdgpu_bo_kunmap(adev->vram_scratch.robj);
352 amdgpu_bo_unpin(adev->vram_scratch.robj);
353 amdgpu_bo_unreserve(adev->vram_scratch.robj);
355 amdgpu_bo_unref(&adev->vram_scratch.robj);
359 * amdgpu_program_register_sequence - program an array of registers.
361 * @adev: amdgpu_device pointer
362 * @registers: pointer to the register array
363 * @array_size: size of the register array
365 * Programs an array or registers with and and or masks.
366 * This is a helper for setting golden registers.
368 void amdgpu_program_register_sequence(struct amdgpu_device *adev,
369 const u32 *registers,
370 const u32 array_size)
372 u32 tmp, reg, and_mask, or_mask;
378 for (i = 0; i < array_size; i +=3) {
379 reg = registers[i + 0];
380 and_mask = registers[i + 1];
381 or_mask = registers[i + 2];
383 if (and_mask == 0xffffffff) {
394 void amdgpu_pci_config_reset(struct amdgpu_device *adev)
396 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
400 * GPU doorbell aperture helpers function.
403 * amdgpu_doorbell_init - Init doorbell driver information.
405 * @adev: amdgpu_device pointer
407 * Init doorbell driver information (CIK)
408 * Returns 0 on success, error on failure.
410 static int amdgpu_doorbell_init(struct amdgpu_device *adev)
412 /* doorbell bar mapping */
413 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
414 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
416 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
417 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
418 if (adev->doorbell.num_doorbells == 0)
421 adev->doorbell.ptr = ioremap(adev->doorbell.base, adev->doorbell.num_doorbells * sizeof(u32));
422 if (adev->doorbell.ptr == NULL) {
425 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)adev->doorbell.base);
426 DRM_INFO("doorbell mmio size: %u\n", (unsigned)adev->doorbell.size);
432 * amdgpu_doorbell_fini - Tear down doorbell driver information.
434 * @adev: amdgpu_device pointer
436 * Tear down doorbell driver information (CIK)
438 static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
440 iounmap(adev->doorbell.ptr);
441 adev->doorbell.ptr = NULL;
445 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
448 * @adev: amdgpu_device pointer
449 * @aperture_base: output returning doorbell aperture base physical address
450 * @aperture_size: output returning doorbell aperture size in bytes
451 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
453 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
454 * takes doorbells required for its own rings and reports the setup to amdkfd.
455 * amdgpu reserved doorbells are at the start of the doorbell aperture.
457 void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
458 phys_addr_t *aperture_base,
459 size_t *aperture_size,
460 size_t *start_offset)
463 * The first num_doorbells are used by amdgpu.
464 * amdkfd takes whatever's left in the aperture.
466 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
467 *aperture_base = adev->doorbell.base;
468 *aperture_size = adev->doorbell.size;
469 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
479 * Writeback is the the method by which the the GPU updates special pages
480 * in memory with the status of certain GPU events (fences, ring pointers,
485 * amdgpu_wb_fini - Disable Writeback and free memory
487 * @adev: amdgpu_device pointer
489 * Disables Writeback and frees the Writeback memory (all asics).
490 * Used at driver shutdown.
492 static void amdgpu_wb_fini(struct amdgpu_device *adev)
494 if (adev->wb.wb_obj) {
495 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
497 (void **)&adev->wb.wb);
498 adev->wb.wb_obj = NULL;
503 * amdgpu_wb_init- Init Writeback driver info and allocate memory
505 * @adev: amdgpu_device pointer
507 * Disables Writeback and frees the Writeback memory (all asics).
508 * Used at driver startup.
509 * Returns 0 on success or an -error on failure.
511 static int amdgpu_wb_init(struct amdgpu_device *adev)
515 if (adev->wb.wb_obj == NULL) {
516 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
517 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
518 &adev->wb.wb_obj, &adev->wb.gpu_addr,
519 (void **)&adev->wb.wb);
521 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
525 adev->wb.num_wb = AMDGPU_MAX_WB;
526 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
528 /* clear wb memory */
529 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
536 * amdgpu_wb_get - Allocate a wb entry
538 * @adev: amdgpu_device pointer
541 * Allocate a wb slot for use by the driver (all asics).
542 * Returns 0 on success or -EINVAL on failure.
544 int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
546 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
547 if (offset < adev->wb.num_wb) {
548 __set_bit(offset, adev->wb.used);
557 * amdgpu_wb_get_64bit - Allocate a wb entry
559 * @adev: amdgpu_device pointer
562 * Allocate a wb slot for use by the driver (all asics).
563 * Returns 0 on success or -EINVAL on failure.
565 int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb)
567 unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used,
568 adev->wb.num_wb, 0, 2, 7, 0);
569 if ((offset + 1) < adev->wb.num_wb) {
570 __set_bit(offset, adev->wb.used);
571 __set_bit(offset + 1, adev->wb.used);
580 * amdgpu_wb_free - Free a wb entry
582 * @adev: amdgpu_device pointer
585 * Free a wb slot allocated for use by the driver (all asics)
587 void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
589 if (wb < adev->wb.num_wb)
590 __clear_bit(wb, adev->wb.used);
594 * amdgpu_wb_free_64bit - Free a wb entry
596 * @adev: amdgpu_device pointer
599 * Free a wb slot allocated for use by the driver (all asics)
601 void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb)
603 if ((wb + 1) < adev->wb.num_wb) {
604 __clear_bit(wb, adev->wb.used);
605 __clear_bit(wb + 1, adev->wb.used);
610 * amdgpu_vram_location - try to find VRAM location
611 * @adev: amdgpu device structure holding all necessary informations
612 * @mc: memory controller structure holding memory informations
613 * @base: base address at which to put VRAM
615 * Function will place try to place VRAM at base address provided
616 * as parameter (which is so far either PCI aperture address or
617 * for IGP TOM base address).
619 * If there is not enough space to fit the unvisible VRAM in the 32bits
620 * address space then we limit the VRAM size to the aperture.
622 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
623 * this shouldn't be a problem as we are using the PCI aperture as a reference.
624 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
627 * Note: we use mc_vram_size as on some board we need to program the mc to
628 * cover the whole aperture even if VRAM size is inferior to aperture size
629 * Novell bug 204882 + along with lots of ubuntu ones
631 * Note: when limiting vram it's safe to overwritte real_vram_size because
632 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
633 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
636 * Note: IGP TOM addr should be the same as the aperture addr, we don't
637 * explicitly check for that thought.
639 * FIXME: when reducing VRAM size align new size on power of 2.
641 void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
643 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
645 mc->vram_start = base;
646 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
647 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
648 mc->real_vram_size = mc->aper_size;
649 mc->mc_vram_size = mc->aper_size;
651 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
652 if (limit && limit < mc->real_vram_size)
653 mc->real_vram_size = limit;
654 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
655 mc->mc_vram_size >> 20, mc->vram_start,
656 mc->vram_end, mc->real_vram_size >> 20);
660 * amdgpu_gtt_location - try to find GTT location
661 * @adev: amdgpu device structure holding all necessary informations
662 * @mc: memory controller structure holding memory informations
664 * Function will place try to place GTT before or after VRAM.
666 * If GTT size is bigger than space left then we ajust GTT size.
667 * Thus function will never fails.
669 * FIXME: when reducing GTT size align new size on power of 2.
671 void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
673 u64 size_af, size_bf;
675 size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
676 size_bf = mc->vram_start & ~mc->gtt_base_align;
677 if (size_bf > size_af) {
678 if (mc->gtt_size > size_bf) {
679 dev_warn(adev->dev, "limiting GTT\n");
680 mc->gtt_size = size_bf;
684 if (mc->gtt_size > size_af) {
685 dev_warn(adev->dev, "limiting GTT\n");
686 mc->gtt_size = size_af;
688 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
690 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
691 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
692 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
696 * GPU helpers function.
699 * amdgpu_need_post - check if the hw need post or not
701 * @adev: amdgpu_device pointer
703 * Check if the asic has been initialized (all asics) at driver startup
704 * or post is needed if hw reset is performed.
705 * Returns true if need or false if not.
707 bool amdgpu_need_post(struct amdgpu_device *adev)
711 if (adev->has_hw_reset) {
712 adev->has_hw_reset = false;
715 /* then check MEM_SIZE, in case the crtcs are off */
716 reg = RREG32(mmCONFIG_MEMSIZE);
725 static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
727 if (amdgpu_sriov_vf(adev))
730 if (amdgpu_passthrough(adev)) {
731 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
732 * some old smc fw still need driver do vPost otherwise gpu hang, while
733 * those smc fw version above 22.15 doesn't have this flaw, so we force
734 * vpost executed for smc version below 22.15
736 if (adev->asic_type == CHIP_FIJI) {
739 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
740 /* force vPost if error occured */
744 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
745 if (fw_ver < 0x00160e00)
749 return amdgpu_need_post(adev);
753 * amdgpu_dummy_page_init - init dummy page used by the driver
755 * @adev: amdgpu_device pointer
757 * Allocate the dummy page used by the driver (all asics).
758 * This dummy page is used by the driver as a filler for gart entries
759 * when pages are taken out of the GART
760 * Returns 0 on sucess, -ENOMEM on failure.
762 int amdgpu_dummy_page_init(struct amdgpu_device *adev)
764 if (adev->dummy_page.page)
766 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
767 if (adev->dummy_page.page == NULL)
769 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
770 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
771 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
772 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
773 __free_page(adev->dummy_page.page);
774 adev->dummy_page.page = NULL;
781 * amdgpu_dummy_page_fini - free dummy page used by the driver
783 * @adev: amdgpu_device pointer
785 * Frees the dummy page used by the driver (all asics).
787 void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
789 if (adev->dummy_page.page == NULL)
791 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
792 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
793 __free_page(adev->dummy_page.page);
794 adev->dummy_page.page = NULL;
798 /* ATOM accessor methods */
800 * ATOM is an interpreted byte code stored in tables in the vbios. The
801 * driver registers callbacks to access registers and the interpreter
802 * in the driver parses the tables and executes then to program specific
803 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
804 * atombios.h, and atom.c
808 * cail_pll_read - read PLL register
810 * @info: atom card_info pointer
811 * @reg: PLL register offset
813 * Provides a PLL register accessor for the atom interpreter (r4xx+).
814 * Returns the value of the PLL register.
816 static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
822 * cail_pll_write - write PLL register
824 * @info: atom card_info pointer
825 * @reg: PLL register offset
826 * @val: value to write to the pll register
828 * Provides a PLL register accessor for the atom interpreter (r4xx+).
830 static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
836 * cail_mc_read - read MC (Memory Controller) register
838 * @info: atom card_info pointer
839 * @reg: MC register offset
841 * Provides an MC register accessor for the atom interpreter (r4xx+).
842 * Returns the value of the MC register.
844 static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
850 * cail_mc_write - write MC (Memory Controller) register
852 * @info: atom card_info pointer
853 * @reg: MC register offset
854 * @val: value to write to the pll register
856 * Provides a MC register accessor for the atom interpreter (r4xx+).
858 static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
864 * cail_reg_write - write MMIO register
866 * @info: atom card_info pointer
867 * @reg: MMIO register offset
868 * @val: value to write to the pll register
870 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
872 static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
874 struct amdgpu_device *adev = info->dev->dev_private;
880 * cail_reg_read - read MMIO register
882 * @info: atom card_info pointer
883 * @reg: MMIO register offset
885 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
886 * Returns the value of the MMIO register.
888 static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
890 struct amdgpu_device *adev = info->dev->dev_private;
898 * cail_ioreg_write - write IO register
900 * @info: atom card_info pointer
901 * @reg: IO register offset
902 * @val: value to write to the pll register
904 * Provides a IO register accessor for the atom interpreter (r4xx+).
906 static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
908 struct amdgpu_device *adev = info->dev->dev_private;
914 * cail_ioreg_read - read IO register
916 * @info: atom card_info pointer
917 * @reg: IO register offset
919 * Provides an IO register accessor for the atom interpreter (r4xx+).
920 * Returns the value of the IO register.
922 static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
924 struct amdgpu_device *adev = info->dev->dev_private;
932 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
934 * @adev: amdgpu_device pointer
936 * Frees the driver info and register access callbacks for the ATOM
937 * interpreter (r4xx+).
938 * Called at driver shutdown.
940 static void amdgpu_atombios_fini(struct amdgpu_device *adev)
942 if (adev->mode_info.atom_context) {
943 kfree(adev->mode_info.atom_context->scratch);
944 kfree(adev->mode_info.atom_context->iio);
946 kfree(adev->mode_info.atom_context);
947 adev->mode_info.atom_context = NULL;
948 kfree(adev->mode_info.atom_card_info);
949 adev->mode_info.atom_card_info = NULL;
953 * amdgpu_atombios_init - init the driver info and callbacks for atombios
955 * @adev: amdgpu_device pointer
957 * Initializes the driver info and register access callbacks for the
958 * ATOM interpreter (r4xx+).
959 * Returns 0 on sucess, -ENOMEM on failure.
960 * Called at driver startup.
962 static int amdgpu_atombios_init(struct amdgpu_device *adev)
964 struct card_info *atom_card_info =
965 kzalloc(sizeof(struct card_info), GFP_KERNEL);
970 adev->mode_info.atom_card_info = atom_card_info;
971 atom_card_info->dev = adev->ddev;
972 atom_card_info->reg_read = cail_reg_read;
973 atom_card_info->reg_write = cail_reg_write;
974 /* needed for iio ops */
976 atom_card_info->ioreg_read = cail_ioreg_read;
977 atom_card_info->ioreg_write = cail_ioreg_write;
979 DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
980 atom_card_info->ioreg_read = cail_reg_read;
981 atom_card_info->ioreg_write = cail_reg_write;
983 atom_card_info->mc_read = cail_mc_read;
984 atom_card_info->mc_write = cail_mc_write;
985 atom_card_info->pll_read = cail_pll_read;
986 atom_card_info->pll_write = cail_pll_write;
988 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
989 if (!adev->mode_info.atom_context) {
990 amdgpu_atombios_fini(adev);
994 mutex_init(&adev->mode_info.atom_context->mutex);
995 amdgpu_atombios_scratch_regs_init(adev);
996 amdgpu_atom_allocate_fb_scratch(adev->mode_info.atom_context);
1000 /* if we get transitioned to only one device, take VGA back */
1002 * amdgpu_vga_set_decode - enable/disable vga decode
1004 * @cookie: amdgpu_device pointer
1005 * @state: enable/disable vga decode
1007 * Enable/disable vga decode (all asics).
1008 * Returns VGA resource flags.
1010 static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
1012 struct amdgpu_device *adev = cookie;
1013 amdgpu_asic_set_vga_state(adev, state);
1015 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1016 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1018 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1022 * amdgpu_check_pot_argument - check that argument is a power of two
1024 * @arg: value to check
1026 * Validates that a certain argument is a power of two (all asics).
1027 * Returns true if argument is valid.
1029 static bool amdgpu_check_pot_argument(int arg)
1031 return (arg & (arg - 1)) == 0;
1035 * amdgpu_check_arguments - validate module params
1037 * @adev: amdgpu_device pointer
1039 * Validates certain module parameters and updates
1040 * the associated values used by the driver (all asics).
1042 static void amdgpu_check_arguments(struct amdgpu_device *adev)
1044 if (amdgpu_sched_jobs < 4) {
1045 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1047 amdgpu_sched_jobs = 4;
1048 } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){
1049 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1051 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1054 if (amdgpu_gart_size != -1) {
1055 /* gtt size must be greater or equal to 32M */
1056 if (amdgpu_gart_size < 32) {
1057 dev_warn(adev->dev, "gart size (%d) too small\n",
1059 amdgpu_gart_size = -1;
1063 if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
1064 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
1069 if (amdgpu_vm_size < 1) {
1070 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1076 * Max GPUVM size for Cayman, SI and CI are 40 bits.
1078 if (amdgpu_vm_size > 1024) {
1079 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1084 /* defines number of bits in page table versus page directory,
1085 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1086 * page table and the remaining bits are in the page directory */
1087 if (amdgpu_vm_block_size == -1) {
1089 /* Total bits covered by PD + PTs */
1090 unsigned bits = ilog2(amdgpu_vm_size) + 18;
1092 /* Make sure the PD is 4K in size up to 8GB address space.
1093 Above that split equal between PD and PTs */
1094 if (amdgpu_vm_size <= 8)
1095 amdgpu_vm_block_size = bits - 9;
1097 amdgpu_vm_block_size = (bits + 3) / 2;
1099 } else if (amdgpu_vm_block_size < 9) {
1100 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1101 amdgpu_vm_block_size);
1102 amdgpu_vm_block_size = 9;
1105 if (amdgpu_vm_block_size > 24 ||
1106 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1107 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1108 amdgpu_vm_block_size);
1109 amdgpu_vm_block_size = 9;
1112 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
1113 !amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
1114 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1115 amdgpu_vram_page_split);
1116 amdgpu_vram_page_split = 1024;
1121 * amdgpu_switcheroo_set_state - set switcheroo state
1123 * @pdev: pci dev pointer
1124 * @state: vga_switcheroo state
1126 * Callback for the switcheroo driver. Suspends or resumes the
1127 * the asics before or after it is powered up using ACPI methods.
1129 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1131 struct drm_device *dev = pci_get_drvdata(pdev);
1133 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1136 if (state == VGA_SWITCHEROO_ON) {
1137 unsigned d3_delay = dev->pdev->d3_delay;
1139 pr_info("amdgpu: switched on\n");
1140 /* don't suspend or resume card normally */
1141 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1143 amdgpu_device_resume(dev, true, true);
1145 dev->pdev->d3_delay = d3_delay;
1147 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1148 drm_kms_helper_poll_enable(dev);
1150 pr_info("amdgpu: switched off\n");
1151 drm_kms_helper_poll_disable(dev);
1152 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1153 amdgpu_device_suspend(dev, true, true);
1154 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1159 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1161 * @pdev: pci dev pointer
1163 * Callback for the switcheroo driver. Check of the switcheroo
1164 * state can be changed.
1165 * Returns true if the state can be changed, false if not.
1167 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1169 struct drm_device *dev = pci_get_drvdata(pdev);
1172 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1173 * locking inversion with the driver load path. And the access here is
1174 * completely racy anyway. So don't bother with locking for now.
1176 return dev->open_count == 0;
1179 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1180 .set_gpu_state = amdgpu_switcheroo_set_state,
1182 .can_switch = amdgpu_switcheroo_can_switch,
1185 int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
1186 enum amd_ip_block_type block_type,
1187 enum amd_clockgating_state state)
1191 for (i = 0; i < adev->num_ip_blocks; i++) {
1192 if (!adev->ip_blocks[i].status.valid)
1194 if (adev->ip_blocks[i].version->type != block_type)
1196 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1198 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1199 (void *)adev, state);
1201 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1202 adev->ip_blocks[i].version->funcs->name, r);
1207 int amdgpu_set_powergating_state(struct amdgpu_device *adev,
1208 enum amd_ip_block_type block_type,
1209 enum amd_powergating_state state)
1213 for (i = 0; i < adev->num_ip_blocks; i++) {
1214 if (!adev->ip_blocks[i].status.valid)
1216 if (adev->ip_blocks[i].version->type != block_type)
1218 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1220 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1221 (void *)adev, state);
1223 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1224 adev->ip_blocks[i].version->funcs->name, r);
1229 void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1233 for (i = 0; i < adev->num_ip_blocks; i++) {
1234 if (!adev->ip_blocks[i].status.valid)
1236 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1237 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1241 int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1242 enum amd_ip_block_type block_type)
1246 for (i = 0; i < adev->num_ip_blocks; i++) {
1247 if (!adev->ip_blocks[i].status.valid)
1249 if (adev->ip_blocks[i].version->type == block_type) {
1250 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1260 bool amdgpu_is_idle(struct amdgpu_device *adev,
1261 enum amd_ip_block_type block_type)
1265 for (i = 0; i < adev->num_ip_blocks; i++) {
1266 if (!adev->ip_blocks[i].status.valid)
1268 if (adev->ip_blocks[i].version->type == block_type)
1269 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1275 struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1276 enum amd_ip_block_type type)
1280 for (i = 0; i < adev->num_ip_blocks; i++)
1281 if (adev->ip_blocks[i].version->type == type)
1282 return &adev->ip_blocks[i];
1288 * amdgpu_ip_block_version_cmp
1290 * @adev: amdgpu_device pointer
1291 * @type: enum amd_ip_block_type
1292 * @major: major version
1293 * @minor: minor version
1295 * return 0 if equal or greater
1296 * return 1 if smaller or the ip_block doesn't exist
1298 int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
1299 enum amd_ip_block_type type,
1300 u32 major, u32 minor)
1302 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
1304 if (ip_block && ((ip_block->version->major > major) ||
1305 ((ip_block->version->major == major) &&
1306 (ip_block->version->minor >= minor))))
1313 * amdgpu_ip_block_add
1315 * @adev: amdgpu_device pointer
1316 * @ip_block_version: pointer to the IP to add
1318 * Adds the IP block driver information to the collection of IPs
1321 int amdgpu_ip_block_add(struct amdgpu_device *adev,
1322 const struct amdgpu_ip_block_version *ip_block_version)
1324 if (!ip_block_version)
1327 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1332 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1334 adev->enable_virtual_display = false;
1336 if (amdgpu_virtual_display) {
1337 struct drm_device *ddev = adev->ddev;
1338 const char *pci_address_name = pci_name(ddev->pdev);
1339 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1341 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1342 pciaddstr_tmp = pciaddstr;
1343 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1344 pciaddname = strsep(&pciaddname_tmp, ",");
1345 if (!strcmp("all", pciaddname)
1346 || !strcmp(pci_address_name, pciaddname)) {
1350 adev->enable_virtual_display = true;
1353 res = kstrtol(pciaddname_tmp, 10,
1361 adev->mode_info.num_crtc = num_crtc;
1363 adev->mode_info.num_crtc = 1;
1369 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1370 amdgpu_virtual_display, pci_address_name,
1371 adev->enable_virtual_display, adev->mode_info.num_crtc);
1377 static int amdgpu_early_init(struct amdgpu_device *adev)
1381 amdgpu_device_enable_virtual_display(adev);
1383 switch (adev->asic_type) {
1387 case CHIP_POLARIS11:
1388 case CHIP_POLARIS10:
1389 case CHIP_POLARIS12:
1392 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
1393 adev->family = AMDGPU_FAMILY_CZ;
1395 adev->family = AMDGPU_FAMILY_VI;
1397 r = vi_set_ip_blocks(adev);
1401 #ifdef CONFIG_DRM_AMDGPU_SI
1407 adev->family = AMDGPU_FAMILY_SI;
1408 r = si_set_ip_blocks(adev);
1413 #ifdef CONFIG_DRM_AMDGPU_CIK
1419 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1420 adev->family = AMDGPU_FAMILY_CI;
1422 adev->family = AMDGPU_FAMILY_KV;
1424 r = cik_set_ip_blocks(adev);
1430 /* FIXME: not supported yet */
1434 if (amdgpu_sriov_vf(adev)) {
1435 r = amdgpu_virt_request_full_gpu(adev, true);
1440 for (i = 0; i < adev->num_ip_blocks; i++) {
1441 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1442 DRM_ERROR("disabled ip block: %d\n", i);
1443 adev->ip_blocks[i].status.valid = false;
1445 if (adev->ip_blocks[i].version->funcs->early_init) {
1446 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
1448 adev->ip_blocks[i].status.valid = false;
1450 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1451 adev->ip_blocks[i].version->funcs->name, r);
1454 adev->ip_blocks[i].status.valid = true;
1457 adev->ip_blocks[i].status.valid = true;
1462 adev->cg_flags &= amdgpu_cg_mask;
1463 adev->pg_flags &= amdgpu_pg_mask;
1468 static int amdgpu_init(struct amdgpu_device *adev)
1472 for (i = 0; i < adev->num_ip_blocks; i++) {
1473 if (!adev->ip_blocks[i].status.valid)
1475 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
1477 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1478 adev->ip_blocks[i].version->funcs->name, r);
1481 adev->ip_blocks[i].status.sw = true;
1482 /* need to do gmc hw init early so we can allocate gpu mem */
1483 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1484 r = amdgpu_vram_scratch_init(adev);
1486 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
1489 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1491 DRM_ERROR("hw_init %d failed %d\n", i, r);
1494 r = amdgpu_wb_init(adev);
1496 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
1499 adev->ip_blocks[i].status.hw = true;
1501 /* right after GMC hw init, we create CSA */
1502 if (amdgpu_sriov_vf(adev)) {
1503 r = amdgpu_allocate_static_csa(adev);
1505 DRM_ERROR("allocate CSA failed %d\n", r);
1512 for (i = 0; i < adev->num_ip_blocks; i++) {
1513 if (!adev->ip_blocks[i].status.sw)
1515 /* gmc hw init is done early */
1516 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
1518 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1520 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1521 adev->ip_blocks[i].version->funcs->name, r);
1524 adev->ip_blocks[i].status.hw = true;
1530 static int amdgpu_late_init(struct amdgpu_device *adev)
1534 for (i = 0; i < adev->num_ip_blocks; i++) {
1535 if (!adev->ip_blocks[i].status.valid)
1537 if (adev->ip_blocks[i].version->funcs->late_init) {
1538 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1540 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1541 adev->ip_blocks[i].version->funcs->name, r);
1544 adev->ip_blocks[i].status.late_initialized = true;
1546 /* skip CG for VCE/UVD, it's handled specially */
1547 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1548 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1549 /* enable clockgating to save power */
1550 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1553 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1554 adev->ip_blocks[i].version->funcs->name, r);
1563 static int amdgpu_fini(struct amdgpu_device *adev)
1567 /* need to disable SMC first */
1568 for (i = 0; i < adev->num_ip_blocks; i++) {
1569 if (!adev->ip_blocks[i].status.hw)
1571 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
1572 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1573 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1574 AMD_CG_STATE_UNGATE);
1576 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1577 adev->ip_blocks[i].version->funcs->name, r);
1580 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
1581 /* XXX handle errors */
1583 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1584 adev->ip_blocks[i].version->funcs->name, r);
1586 adev->ip_blocks[i].status.hw = false;
1591 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1592 if (!adev->ip_blocks[i].status.hw)
1594 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1595 amdgpu_wb_fini(adev);
1596 amdgpu_vram_scratch_fini(adev);
1599 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1600 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1601 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1602 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1603 AMD_CG_STATE_UNGATE);
1605 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1606 adev->ip_blocks[i].version->funcs->name, r);
1611 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
1612 /* XXX handle errors */
1614 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1615 adev->ip_blocks[i].version->funcs->name, r);
1618 adev->ip_blocks[i].status.hw = false;
1621 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1622 if (!adev->ip_blocks[i].status.sw)
1624 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
1625 /* XXX handle errors */
1627 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1628 adev->ip_blocks[i].version->funcs->name, r);
1630 adev->ip_blocks[i].status.sw = false;
1631 adev->ip_blocks[i].status.valid = false;
1634 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1635 if (!adev->ip_blocks[i].status.late_initialized)
1637 if (adev->ip_blocks[i].version->funcs->late_fini)
1638 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1639 adev->ip_blocks[i].status.late_initialized = false;
1642 if (amdgpu_sriov_vf(adev)) {
1643 amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
1644 amdgpu_virt_release_full_gpu(adev, false);
1650 int amdgpu_suspend(struct amdgpu_device *adev)
1654 if (amdgpu_sriov_vf(adev))
1655 amdgpu_virt_request_full_gpu(adev, false);
1657 /* ungate SMC block first */
1658 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1659 AMD_CG_STATE_UNGATE);
1661 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1664 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1665 if (!adev->ip_blocks[i].status.valid)
1667 /* ungate blocks so that suspend can properly shut them down */
1668 if (i != AMD_IP_BLOCK_TYPE_SMC) {
1669 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1670 AMD_CG_STATE_UNGATE);
1672 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1673 adev->ip_blocks[i].version->funcs->name, r);
1676 /* XXX handle errors */
1677 r = adev->ip_blocks[i].version->funcs->suspend(adev);
1678 /* XXX handle errors */
1680 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1681 adev->ip_blocks[i].version->funcs->name, r);
1685 if (amdgpu_sriov_vf(adev))
1686 amdgpu_virt_release_full_gpu(adev, false);
1691 static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
1695 for (i = 0; i < adev->num_ip_blocks; i++) {
1696 if (!adev->ip_blocks[i].status.valid)
1699 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1700 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1701 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)
1702 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1705 DRM_ERROR("resume of IP block <%s> failed %d\n",
1706 adev->ip_blocks[i].version->funcs->name, r);
1714 static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
1718 for (i = 0; i < adev->num_ip_blocks; i++) {
1719 if (!adev->ip_blocks[i].status.valid)
1722 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1723 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1724 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
1727 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1729 DRM_ERROR("resume of IP block <%s> failed %d\n",
1730 adev->ip_blocks[i].version->funcs->name, r);
1738 static int amdgpu_resume(struct amdgpu_device *adev)
1742 for (i = 0; i < adev->num_ip_blocks; i++) {
1743 if (!adev->ip_blocks[i].status.valid)
1745 r = adev->ip_blocks[i].version->funcs->resume(adev);
1747 DRM_ERROR("resume of IP block <%s> failed %d\n",
1748 adev->ip_blocks[i].version->funcs->name, r);
1756 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
1758 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
1759 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1763 * amdgpu_device_init - initialize the driver
1765 * @adev: amdgpu_device pointer
1766 * @pdev: drm dev pointer
1767 * @pdev: pci dev pointer
1768 * @flags: driver flags
1770 * Initializes the driver info and hw (all asics).
1771 * Returns 0 for success or an error on failure.
1772 * Called at driver startup.
1774 int amdgpu_device_init(struct amdgpu_device *adev,
1775 struct drm_device *ddev,
1776 struct pci_dev *pdev,
1780 bool runtime = false;
1783 adev->shutdown = false;
1784 adev->dev = &pdev->dev;
1787 adev->flags = flags;
1788 adev->asic_type = flags & AMD_ASIC_MASK;
1789 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
1790 adev->mc.gtt_size = 512 * 1024 * 1024;
1791 adev->accel_working = false;
1792 adev->num_rings = 0;
1793 adev->mman.buffer_funcs = NULL;
1794 adev->mman.buffer_funcs_ring = NULL;
1795 adev->vm_manager.vm_pte_funcs = NULL;
1796 adev->vm_manager.vm_pte_num_rings = 0;
1797 adev->gart.gart_funcs = NULL;
1798 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
1800 adev->smc_rreg = &amdgpu_invalid_rreg;
1801 adev->smc_wreg = &amdgpu_invalid_wreg;
1802 adev->pcie_rreg = &amdgpu_invalid_rreg;
1803 adev->pcie_wreg = &amdgpu_invalid_wreg;
1804 adev->pciep_rreg = &amdgpu_invalid_rreg;
1805 adev->pciep_wreg = &amdgpu_invalid_wreg;
1806 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
1807 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
1808 adev->didt_rreg = &amdgpu_invalid_rreg;
1809 adev->didt_wreg = &amdgpu_invalid_wreg;
1810 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
1811 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
1812 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
1813 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
1816 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1817 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
1818 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
1820 /* mutex initialization are all done here so we
1821 * can recall function without having locking issues */
1822 mutex_init(&adev->vm_manager.lock);
1823 atomic_set(&adev->irq.ih.lock, 0);
1824 mutex_init(&adev->pm.mutex);
1825 mutex_init(&adev->gfx.gpu_clock_mutex);
1826 mutex_init(&adev->srbm_mutex);
1827 mutex_init(&adev->grbm_idx_mutex);
1828 mutex_init(&adev->mn_lock);
1829 hash_init(adev->mn_hash);
1831 amdgpu_check_arguments(adev);
1833 /* Registers mapping */
1834 /* TODO: block userspace mapping of io register */
1835 spin_lock_init(&adev->mmio_idx_lock);
1836 spin_lock_init(&adev->smc_idx_lock);
1837 spin_lock_init(&adev->pcie_idx_lock);
1838 spin_lock_init(&adev->uvd_ctx_idx_lock);
1839 spin_lock_init(&adev->didt_idx_lock);
1840 spin_lock_init(&adev->gc_cac_idx_lock);
1841 spin_lock_init(&adev->audio_endpt_idx_lock);
1842 spin_lock_init(&adev->mm_stats.lock);
1844 INIT_LIST_HEAD(&adev->shadow_list);
1845 mutex_init(&adev->shadow_list_lock);
1847 INIT_LIST_HEAD(&adev->gtt_list);
1848 spin_lock_init(&adev->gtt_list_lock);
1850 if (adev->asic_type >= CHIP_BONAIRE) {
1851 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
1852 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
1854 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
1855 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
1858 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
1859 if (adev->rmmio == NULL) {
1862 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
1863 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
1865 if (adev->asic_type >= CHIP_BONAIRE)
1866 /* doorbell bar mapping */
1867 amdgpu_doorbell_init(adev);
1869 /* io port mapping */
1870 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1871 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
1872 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
1873 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
1877 if (adev->rio_mem == NULL)
1878 DRM_INFO("PCI I/O BAR is not found.\n");
1880 /* early init functions */
1881 r = amdgpu_early_init(adev);
1885 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
1886 /* this will fail for cards that aren't VGA class devices, just
1888 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
1890 if (amdgpu_runtime_pm == 1)
1892 if (amdgpu_device_is_px(ddev))
1894 vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
1896 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
1899 if (!amdgpu_get_bios(adev)) {
1904 r = amdgpu_atombios_init(adev);
1906 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
1910 /* detect if we are with an SRIOV vbios */
1911 amdgpu_device_detect_sriov_bios(adev);
1913 /* Post card if necessary */
1914 if (amdgpu_vpost_needed(adev)) {
1916 dev_err(adev->dev, "no vBIOS found\n");
1920 DRM_INFO("GPU posting now...\n");
1921 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
1923 dev_err(adev->dev, "gpu post error!\n");
1927 DRM_INFO("GPU post is not needed\n");
1930 /* Initialize clocks */
1931 r = amdgpu_atombios_get_clock_info(adev);
1933 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
1936 /* init i2c buses */
1937 amdgpu_atombios_i2c_init(adev);
1940 r = amdgpu_fence_driver_init(adev);
1942 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
1946 /* init the mode config */
1947 drm_mode_config_init(adev->ddev);
1949 r = amdgpu_init(adev);
1951 dev_err(adev->dev, "amdgpu_init failed\n");
1956 adev->accel_working = true;
1958 /* Initialize the buffer migration limit. */
1959 if (amdgpu_moverate >= 0)
1960 max_MBps = amdgpu_moverate;
1962 max_MBps = 8; /* Allow 8 MB/s. */
1963 /* Get a log2 for easy divisions. */
1964 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
1966 r = amdgpu_ib_pool_init(adev);
1968 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
1972 r = amdgpu_ib_ring_tests(adev);
1974 DRM_ERROR("ib ring test failed (%d).\n", r);
1976 amdgpu_fbdev_init(adev);
1978 r = amdgpu_gem_debugfs_init(adev);
1980 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1982 r = amdgpu_debugfs_regs_init(adev);
1984 DRM_ERROR("registering register debugfs failed (%d).\n", r);
1986 r = amdgpu_debugfs_firmware_init(adev);
1988 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
1990 if ((amdgpu_testing & 1)) {
1991 if (adev->accel_working)
1992 amdgpu_test_moves(adev);
1994 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
1996 if (amdgpu_benchmarking) {
1997 if (adev->accel_working)
1998 amdgpu_benchmark(adev, amdgpu_benchmarking);
2000 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2003 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2004 * explicit gating rather than handling it automatically.
2006 r = amdgpu_late_init(adev);
2008 dev_err(adev->dev, "amdgpu_late_init failed\n");
2016 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2021 * amdgpu_device_fini - tear down the driver
2023 * @adev: amdgpu_device pointer
2025 * Tear down the driver info (all asics).
2026 * Called at driver shutdown.
2028 void amdgpu_device_fini(struct amdgpu_device *adev)
2032 DRM_INFO("amdgpu: finishing device.\n");
2033 adev->shutdown = true;
2034 drm_crtc_force_disable_all(adev->ddev);
2035 /* evict vram memory */
2036 amdgpu_bo_evict_vram(adev);
2037 amdgpu_ib_pool_fini(adev);
2038 amdgpu_fence_driver_fini(adev);
2039 amdgpu_fbdev_fini(adev);
2040 r = amdgpu_fini(adev);
2041 adev->accel_working = false;
2042 /* free i2c buses */
2043 amdgpu_i2c_fini(adev);
2044 amdgpu_atombios_fini(adev);
2047 vga_switcheroo_unregister_client(adev->pdev);
2048 if (adev->flags & AMD_IS_PX)
2049 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2050 vga_client_register(adev->pdev, NULL, NULL, NULL);
2052 pci_iounmap(adev->pdev, adev->rio_mem);
2053 adev->rio_mem = NULL;
2054 iounmap(adev->rmmio);
2056 if (adev->asic_type >= CHIP_BONAIRE)
2057 amdgpu_doorbell_fini(adev);
2058 amdgpu_debugfs_regs_cleanup(adev);
2066 * amdgpu_device_suspend - initiate device suspend
2068 * @pdev: drm dev pointer
2069 * @state: suspend state
2071 * Puts the hw in the suspend state (all asics).
2072 * Returns 0 for success or an error on failure.
2073 * Called at driver suspend.
2075 int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
2077 struct amdgpu_device *adev;
2078 struct drm_crtc *crtc;
2079 struct drm_connector *connector;
2082 if (dev == NULL || dev->dev_private == NULL) {
2086 adev = dev->dev_private;
2088 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2091 drm_kms_helper_poll_disable(dev);
2093 /* turn off display hw */
2094 drm_modeset_lock_all(dev);
2095 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2096 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2098 drm_modeset_unlock_all(dev);
2100 /* unpin the front buffers and cursors */
2101 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2102 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2103 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2104 struct amdgpu_bo *robj;
2106 if (amdgpu_crtc->cursor_bo) {
2107 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2108 r = amdgpu_bo_reserve(aobj, false);
2110 amdgpu_bo_unpin(aobj);
2111 amdgpu_bo_unreserve(aobj);
2115 if (rfb == NULL || rfb->obj == NULL) {
2118 robj = gem_to_amdgpu_bo(rfb->obj);
2119 /* don't unpin kernel fb objects */
2120 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
2121 r = amdgpu_bo_reserve(robj, false);
2123 amdgpu_bo_unpin(robj);
2124 amdgpu_bo_unreserve(robj);
2128 /* evict vram memory */
2129 amdgpu_bo_evict_vram(adev);
2131 amdgpu_fence_driver_suspend(adev);
2133 r = amdgpu_suspend(adev);
2135 /* evict remaining vram memory
2136 * This second call to evict vram is to evict the gart page table
2139 amdgpu_bo_evict_vram(adev);
2141 amdgpu_atombios_scratch_regs_save(adev);
2142 pci_save_state(dev->pdev);
2144 /* Shut down the device */
2145 pci_disable_device(dev->pdev);
2146 pci_set_power_state(dev->pdev, PCI_D3hot);
2148 r = amdgpu_asic_reset(adev);
2150 DRM_ERROR("amdgpu asic reset failed\n");
2155 amdgpu_fbdev_set_suspend(adev, 1);
2162 * amdgpu_device_resume - initiate device resume
2164 * @pdev: drm dev pointer
2166 * Bring the hw back to operating state (all asics).
2167 * Returns 0 for success or an error on failure.
2168 * Called at driver resume.
2170 int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2172 struct drm_connector *connector;
2173 struct amdgpu_device *adev = dev->dev_private;
2174 struct drm_crtc *crtc;
2177 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2184 pci_set_power_state(dev->pdev, PCI_D0);
2185 pci_restore_state(dev->pdev);
2186 r = pci_enable_device(dev->pdev);
2193 amdgpu_atombios_scratch_regs_restore(adev);
2196 if (amdgpu_need_post(adev)) {
2197 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2199 DRM_ERROR("amdgpu asic init failed\n");
2202 r = amdgpu_resume(adev);
2204 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
2206 amdgpu_fence_driver_resume(adev);
2209 r = amdgpu_ib_ring_tests(adev);
2211 DRM_ERROR("ib ring test failed (%d).\n", r);
2214 r = amdgpu_late_init(adev);
2222 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2223 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2225 if (amdgpu_crtc->cursor_bo) {
2226 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2227 r = amdgpu_bo_reserve(aobj, false);
2229 r = amdgpu_bo_pin(aobj,
2230 AMDGPU_GEM_DOMAIN_VRAM,
2231 &amdgpu_crtc->cursor_addr);
2233 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2234 amdgpu_bo_unreserve(aobj);
2239 /* blat the mode back in */
2241 drm_helper_resume_force_mode(dev);
2242 /* turn on display hw */
2243 drm_modeset_lock_all(dev);
2244 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2245 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2247 drm_modeset_unlock_all(dev);
2250 drm_kms_helper_poll_enable(dev);
2253 * Most of the connector probing functions try to acquire runtime pm
2254 * refs to ensure that the GPU is powered on when connector polling is
2255 * performed. Since we're calling this from a runtime PM callback,
2256 * trying to acquire rpm refs will cause us to deadlock.
2258 * Since we're guaranteed to be holding the rpm lock, it's safe to
2259 * temporarily disable the rpm helpers so this doesn't deadlock us.
2262 dev->dev->power.disable_depth++;
2264 drm_helper_hpd_irq_event(dev);
2266 dev->dev->power.disable_depth--;
2270 amdgpu_fbdev_set_suspend(adev, 0);
2277 static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2280 bool asic_hang = false;
2282 for (i = 0; i < adev->num_ip_blocks; i++) {
2283 if (!adev->ip_blocks[i].status.valid)
2285 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2286 adev->ip_blocks[i].status.hang =
2287 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2288 if (adev->ip_blocks[i].status.hang) {
2289 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
2296 static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
2300 for (i = 0; i < adev->num_ip_blocks; i++) {
2301 if (!adev->ip_blocks[i].status.valid)
2303 if (adev->ip_blocks[i].status.hang &&
2304 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2305 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
2314 static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2318 for (i = 0; i < adev->num_ip_blocks; i++) {
2319 if (!adev->ip_blocks[i].status.valid)
2321 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2322 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2323 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
2324 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
2325 if (adev->ip_blocks[i].status.hang) {
2326 DRM_INFO("Some block need full reset!\n");
2334 static int amdgpu_soft_reset(struct amdgpu_device *adev)
2338 for (i = 0; i < adev->num_ip_blocks; i++) {
2339 if (!adev->ip_blocks[i].status.valid)
2341 if (adev->ip_blocks[i].status.hang &&
2342 adev->ip_blocks[i].version->funcs->soft_reset) {
2343 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
2352 static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2356 for (i = 0; i < adev->num_ip_blocks; i++) {
2357 if (!adev->ip_blocks[i].status.valid)
2359 if (adev->ip_blocks[i].status.hang &&
2360 adev->ip_blocks[i].version->funcs->post_soft_reset)
2361 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
2369 bool amdgpu_need_backup(struct amdgpu_device *adev)
2371 if (adev->flags & AMD_IS_APU)
2374 return amdgpu_lockup_timeout > 0 ? true : false;
2377 static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2378 struct amdgpu_ring *ring,
2379 struct amdgpu_bo *bo,
2380 struct dma_fence **fence)
2388 r = amdgpu_bo_reserve(bo, false);
2391 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2392 /* if bo has been evicted, then no need to recover */
2393 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
2394 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
2397 DRM_ERROR("recover page table failed!\n");
2402 amdgpu_bo_unreserve(bo);
2407 * amdgpu_sriov_gpu_reset - reset the asic
2409 * @adev: amdgpu device pointer
2410 * @voluntary: if this reset is requested by guest.
2411 * (true means by guest and false means by HYPERVISOR )
2413 * Attempt the reset the GPU if it has hung (all asics).
2415 * Returns 0 for success or an error on failure.
2417 int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary)
2421 struct amdgpu_bo *bo, *tmp;
2422 struct amdgpu_ring *ring;
2423 struct dma_fence *fence = NULL, *next = NULL;
2425 mutex_lock(&adev->virt.lock_reset);
2426 atomic_inc(&adev->gpu_reset_counter);
2427 adev->gfx.in_reset = true;
2430 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2432 /* block scheduler */
2433 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2434 ring = adev->rings[i];
2436 if (!ring || !ring->sched.thread)
2439 kthread_park(ring->sched.thread);
2440 amd_sched_hw_job_reset(&ring->sched);
2443 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2444 amdgpu_fence_driver_force_completion(adev);
2446 /* request to take full control of GPU before re-initialization */
2448 amdgpu_virt_reset_gpu(adev);
2450 amdgpu_virt_request_full_gpu(adev, true);
2453 /* Resume IP prior to SMC */
2454 amdgpu_sriov_reinit_early(adev);
2456 /* we need recover gart prior to run SMC/CP/SDMA resume */
2457 amdgpu_ttm_recover_gart(adev);
2459 /* now we are okay to resume SMC/CP/SDMA */
2460 amdgpu_sriov_reinit_late(adev);
2462 amdgpu_irq_gpu_reset_resume_helper(adev);
2464 if (amdgpu_ib_ring_tests(adev))
2465 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2467 /* release full control of GPU after ib test */
2468 amdgpu_virt_release_full_gpu(adev, true);
2470 DRM_INFO("recover vram bo from shadow\n");
2472 ring = adev->mman.buffer_funcs_ring;
2473 mutex_lock(&adev->shadow_list_lock);
2474 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2475 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2477 r = dma_fence_wait(fence, false);
2479 WARN(r, "recovery from shadow isn't completed\n");
2484 dma_fence_put(fence);
2487 mutex_unlock(&adev->shadow_list_lock);
2490 r = dma_fence_wait(fence, false);
2492 WARN(r, "recovery from shadow isn't completed\n");
2494 dma_fence_put(fence);
2496 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2497 struct amdgpu_ring *ring = adev->rings[i];
2498 if (!ring || !ring->sched.thread)
2501 amd_sched_job_recovery(&ring->sched);
2502 kthread_unpark(ring->sched.thread);
2505 drm_helper_resume_force_mode(adev->ddev);
2506 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2508 /* bad news, how to tell it to userspace ? */
2509 dev_info(adev->dev, "GPU reset failed\n");
2512 adev->gfx.in_reset = false;
2513 mutex_unlock(&adev->virt.lock_reset);
2518 * amdgpu_gpu_reset - reset the asic
2520 * @adev: amdgpu device pointer
2522 * Attempt the reset the GPU if it has hung (all asics).
2523 * Returns 0 for success or an error on failure.
2525 int amdgpu_gpu_reset(struct amdgpu_device *adev)
2529 bool need_full_reset;
2531 if (amdgpu_sriov_vf(adev))
2532 return amdgpu_sriov_gpu_reset(adev, true);
2534 if (!amdgpu_check_soft_reset(adev)) {
2535 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2539 atomic_inc(&adev->gpu_reset_counter);
2542 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2544 /* block scheduler */
2545 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2546 struct amdgpu_ring *ring = adev->rings[i];
2550 kthread_park(ring->sched.thread);
2551 amd_sched_hw_job_reset(&ring->sched);
2553 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2554 amdgpu_fence_driver_force_completion(adev);
2556 need_full_reset = amdgpu_need_full_reset(adev);
2558 if (!need_full_reset) {
2559 amdgpu_pre_soft_reset(adev);
2560 r = amdgpu_soft_reset(adev);
2561 amdgpu_post_soft_reset(adev);
2562 if (r || amdgpu_check_soft_reset(adev)) {
2563 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2564 need_full_reset = true;
2568 if (need_full_reset) {
2569 r = amdgpu_suspend(adev);
2572 /* Disable fb access */
2573 if (adev->mode_info.num_crtc) {
2574 struct amdgpu_mode_mc_save save;
2575 amdgpu_display_stop_mc_access(adev, &save);
2576 amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
2578 amdgpu_atombios_scratch_regs_save(adev);
2579 r = amdgpu_asic_reset(adev);
2580 amdgpu_atombios_scratch_regs_restore(adev);
2582 amdgpu_atom_asic_init(adev->mode_info.atom_context);
2585 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
2586 r = amdgpu_resume(adev);
2590 amdgpu_irq_gpu_reset_resume_helper(adev);
2591 if (need_full_reset && amdgpu_need_backup(adev)) {
2592 r = amdgpu_ttm_recover_gart(adev);
2594 DRM_ERROR("gart recovery failed!!!\n");
2596 r = amdgpu_ib_ring_tests(adev);
2598 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
2599 r = amdgpu_suspend(adev);
2600 need_full_reset = true;
2604 * recovery vm page tables, since we cannot depend on VRAM is
2605 * consistent after gpu full reset.
2607 if (need_full_reset && amdgpu_need_backup(adev)) {
2608 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2609 struct amdgpu_bo *bo, *tmp;
2610 struct dma_fence *fence = NULL, *next = NULL;
2612 DRM_INFO("recover vram bo from shadow\n");
2613 mutex_lock(&adev->shadow_list_lock);
2614 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2615 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2617 r = dma_fence_wait(fence, false);
2619 WARN(r, "recovery from shadow isn't completed\n");
2624 dma_fence_put(fence);
2627 mutex_unlock(&adev->shadow_list_lock);
2629 r = dma_fence_wait(fence, false);
2631 WARN(r, "recovery from shadow isn't completed\n");
2633 dma_fence_put(fence);
2635 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2636 struct amdgpu_ring *ring = adev->rings[i];
2640 amd_sched_job_recovery(&ring->sched);
2641 kthread_unpark(ring->sched.thread);
2644 dev_err(adev->dev, "asic resume failed (%d).\n", r);
2645 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2646 if (adev->rings[i]) {
2647 kthread_unpark(adev->rings[i]->sched.thread);
2652 drm_helper_resume_force_mode(adev->ddev);
2654 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2656 /* bad news, how to tell it to userspace ? */
2657 dev_info(adev->dev, "GPU reset failed\n");
2663 void amdgpu_get_pcie_info(struct amdgpu_device *adev)
2668 if (amdgpu_pcie_gen_cap)
2669 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
2671 if (amdgpu_pcie_lane_cap)
2672 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
2674 /* covers APUs as well */
2675 if (pci_is_root_bus(adev->pdev->bus)) {
2676 if (adev->pm.pcie_gen_mask == 0)
2677 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2678 if (adev->pm.pcie_mlw_mask == 0)
2679 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
2683 if (adev->pm.pcie_gen_mask == 0) {
2684 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2686 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
2687 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2688 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
2690 if (mask & DRM_PCIE_SPEED_25)
2691 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
2692 if (mask & DRM_PCIE_SPEED_50)
2693 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
2694 if (mask & DRM_PCIE_SPEED_80)
2695 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
2697 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2700 if (adev->pm.pcie_mlw_mask == 0) {
2701 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
2705 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
2706 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2707 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2708 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2709 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2710 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2711 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2714 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2715 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2716 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2717 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2718 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2719 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2722 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2723 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2724 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2725 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2726 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2729 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2730 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2731 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2732 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2735 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2736 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2737 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2740 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2741 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2744 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
2750 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
2758 int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
2759 const struct drm_info_list *files,
2764 for (i = 0; i < adev->debugfs_count; i++) {
2765 if (adev->debugfs[i].files == files) {
2766 /* Already registered */
2771 i = adev->debugfs_count + 1;
2772 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
2773 DRM_ERROR("Reached maximum number of debugfs components.\n");
2774 DRM_ERROR("Report so we increase "
2775 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
2778 adev->debugfs[adev->debugfs_count].files = files;
2779 adev->debugfs[adev->debugfs_count].num_files = nfiles;
2780 adev->debugfs_count = i;
2781 #if defined(CONFIG_DEBUG_FS)
2782 drm_debugfs_create_files(files, nfiles,
2783 adev->ddev->primary->debugfs_root,
2784 adev->ddev->primary);
2789 #if defined(CONFIG_DEBUG_FS)
2791 static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
2792 size_t size, loff_t *pos)
2794 struct amdgpu_device *adev = file_inode(f)->i_private;
2797 bool pm_pg_lock, use_bank;
2798 unsigned instance_bank, sh_bank, se_bank;
2800 if (size & 0x3 || *pos & 0x3)
2803 /* are we reading registers for which a PG lock is necessary? */
2804 pm_pg_lock = (*pos >> 23) & 1;
2806 if (*pos & (1ULL << 62)) {
2807 se_bank = (*pos >> 24) & 0x3FF;
2808 sh_bank = (*pos >> 34) & 0x3FF;
2809 instance_bank = (*pos >> 44) & 0x3FF;
2811 if (se_bank == 0x3FF)
2812 se_bank = 0xFFFFFFFF;
2813 if (sh_bank == 0x3FF)
2814 sh_bank = 0xFFFFFFFF;
2815 if (instance_bank == 0x3FF)
2816 instance_bank = 0xFFFFFFFF;
2822 *pos &= (1UL << 22) - 1;
2825 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
2826 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
2828 mutex_lock(&adev->grbm_idx_mutex);
2829 amdgpu_gfx_select_se_sh(adev, se_bank,
2830 sh_bank, instance_bank);
2834 mutex_lock(&adev->pm.mutex);
2839 if (*pos > adev->rmmio_size)
2842 value = RREG32(*pos >> 2);
2843 r = put_user(value, (uint32_t *)buf);
2857 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2858 mutex_unlock(&adev->grbm_idx_mutex);
2862 mutex_unlock(&adev->pm.mutex);
2867 static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
2868 size_t size, loff_t *pos)
2870 struct amdgpu_device *adev = file_inode(f)->i_private;
2873 bool pm_pg_lock, use_bank;
2874 unsigned instance_bank, sh_bank, se_bank;
2876 if (size & 0x3 || *pos & 0x3)
2879 /* are we reading registers for which a PG lock is necessary? */
2880 pm_pg_lock = (*pos >> 23) & 1;
2882 if (*pos & (1ULL << 62)) {
2883 se_bank = (*pos >> 24) & 0x3FF;
2884 sh_bank = (*pos >> 34) & 0x3FF;
2885 instance_bank = (*pos >> 44) & 0x3FF;
2887 if (se_bank == 0x3FF)
2888 se_bank = 0xFFFFFFFF;
2889 if (sh_bank == 0x3FF)
2890 sh_bank = 0xFFFFFFFF;
2891 if (instance_bank == 0x3FF)
2892 instance_bank = 0xFFFFFFFF;
2898 *pos &= (1UL << 22) - 1;
2901 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
2902 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
2904 mutex_lock(&adev->grbm_idx_mutex);
2905 amdgpu_gfx_select_se_sh(adev, se_bank,
2906 sh_bank, instance_bank);
2910 mutex_lock(&adev->pm.mutex);
2915 if (*pos > adev->rmmio_size)
2918 r = get_user(value, (uint32_t *)buf);
2922 WREG32(*pos >> 2, value);
2931 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2932 mutex_unlock(&adev->grbm_idx_mutex);
2936 mutex_unlock(&adev->pm.mutex);
2941 static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
2942 size_t size, loff_t *pos)
2944 struct amdgpu_device *adev = file_inode(f)->i_private;
2948 if (size & 0x3 || *pos & 0x3)
2954 value = RREG32_PCIE(*pos >> 2);
2955 r = put_user(value, (uint32_t *)buf);
2968 static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
2969 size_t size, loff_t *pos)
2971 struct amdgpu_device *adev = file_inode(f)->i_private;
2975 if (size & 0x3 || *pos & 0x3)
2981 r = get_user(value, (uint32_t *)buf);
2985 WREG32_PCIE(*pos >> 2, value);
2996 static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
2997 size_t size, loff_t *pos)
2999 struct amdgpu_device *adev = file_inode(f)->i_private;
3003 if (size & 0x3 || *pos & 0x3)
3009 value = RREG32_DIDT(*pos >> 2);
3010 r = put_user(value, (uint32_t *)buf);
3023 static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
3024 size_t size, loff_t *pos)
3026 struct amdgpu_device *adev = file_inode(f)->i_private;
3030 if (size & 0x3 || *pos & 0x3)
3036 r = get_user(value, (uint32_t *)buf);
3040 WREG32_DIDT(*pos >> 2, value);
3051 static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
3052 size_t size, loff_t *pos)
3054 struct amdgpu_device *adev = file_inode(f)->i_private;
3058 if (size & 0x3 || *pos & 0x3)
3064 value = RREG32_SMC(*pos);
3065 r = put_user(value, (uint32_t *)buf);
3078 static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
3079 size_t size, loff_t *pos)
3081 struct amdgpu_device *adev = file_inode(f)->i_private;
3085 if (size & 0x3 || *pos & 0x3)
3091 r = get_user(value, (uint32_t *)buf);
3095 WREG32_SMC(*pos, value);
3106 static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
3107 size_t size, loff_t *pos)
3109 struct amdgpu_device *adev = file_inode(f)->i_private;
3112 uint32_t *config, no_regs = 0;
3114 if (size & 0x3 || *pos & 0x3)
3117 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
3121 /* version, increment each time something is added */
3122 config[no_regs++] = 3;
3123 config[no_regs++] = adev->gfx.config.max_shader_engines;
3124 config[no_regs++] = adev->gfx.config.max_tile_pipes;
3125 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
3126 config[no_regs++] = adev->gfx.config.max_sh_per_se;
3127 config[no_regs++] = adev->gfx.config.max_backends_per_se;
3128 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
3129 config[no_regs++] = adev->gfx.config.max_gprs;
3130 config[no_regs++] = adev->gfx.config.max_gs_threads;
3131 config[no_regs++] = adev->gfx.config.max_hw_contexts;
3132 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
3133 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
3134 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
3135 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
3136 config[no_regs++] = adev->gfx.config.num_tile_pipes;
3137 config[no_regs++] = adev->gfx.config.backend_enable_mask;
3138 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
3139 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
3140 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
3141 config[no_regs++] = adev->gfx.config.num_gpus;
3142 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
3143 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
3144 config[no_regs++] = adev->gfx.config.gb_addr_config;
3145 config[no_regs++] = adev->gfx.config.num_rbs;
3148 config[no_regs++] = adev->rev_id;
3149 config[no_regs++] = adev->pg_flags;
3150 config[no_regs++] = adev->cg_flags;
3153 config[no_regs++] = adev->family;
3154 config[no_regs++] = adev->external_rev_id;
3157 config[no_regs++] = adev->pdev->device;
3158 config[no_regs++] = adev->pdev->revision;
3159 config[no_regs++] = adev->pdev->subsystem_device;
3160 config[no_regs++] = adev->pdev->subsystem_vendor;
3162 while (size && (*pos < no_regs * 4)) {
3165 value = config[*pos >> 2];
3166 r = put_user(value, (uint32_t *)buf);
3182 static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
3183 size_t size, loff_t *pos)
3185 struct amdgpu_device *adev = file_inode(f)->i_private;
3186 int idx, x, outsize, r, valuesize;
3187 uint32_t values[16];
3189 if (size & 3 || *pos & 0x3)
3192 if (amdgpu_dpm == 0)
3195 /* convert offset to sensor number */
3198 valuesize = sizeof(values);
3199 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
3200 r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &values[0], &valuesize);
3201 else if (adev->pm.funcs && adev->pm.funcs->read_sensor)
3202 r = adev->pm.funcs->read_sensor(adev, idx, &values[0],
3207 if (size > valuesize)
3214 r = put_user(values[x++], (int32_t *)buf);
3221 return !r ? outsize : r;
3224 static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
3225 size_t size, loff_t *pos)
3227 struct amdgpu_device *adev = f->f_inode->i_private;
3230 uint32_t offset, se, sh, cu, wave, simd, data[32];
3232 if (size & 3 || *pos & 3)
3236 offset = (*pos & 0x7F);
3237 se = ((*pos >> 7) & 0xFF);
3238 sh = ((*pos >> 15) & 0xFF);
3239 cu = ((*pos >> 23) & 0xFF);
3240 wave = ((*pos >> 31) & 0xFF);
3241 simd = ((*pos >> 37) & 0xFF);
3243 /* switch to the specific se/sh/cu */
3244 mutex_lock(&adev->grbm_idx_mutex);
3245 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3248 if (adev->gfx.funcs->read_wave_data)
3249 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
3251 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3252 mutex_unlock(&adev->grbm_idx_mutex);
3257 while (size && (offset < x * 4)) {
3260 value = data[offset >> 2];
3261 r = put_user(value, (uint32_t *)buf);
3274 static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
3275 size_t size, loff_t *pos)
3277 struct amdgpu_device *adev = f->f_inode->i_private;
3280 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
3282 if (size & 3 || *pos & 3)
3286 offset = (*pos & 0xFFF); /* in dwords */
3287 se = ((*pos >> 12) & 0xFF);
3288 sh = ((*pos >> 20) & 0xFF);
3289 cu = ((*pos >> 28) & 0xFF);
3290 wave = ((*pos >> 36) & 0xFF);
3291 simd = ((*pos >> 44) & 0xFF);
3292 thread = ((*pos >> 52) & 0xFF);
3293 bank = ((*pos >> 60) & 1);
3295 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3299 /* switch to the specific se/sh/cu */
3300 mutex_lock(&adev->grbm_idx_mutex);
3301 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3304 if (adev->gfx.funcs->read_wave_vgprs)
3305 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3307 if (adev->gfx.funcs->read_wave_sgprs)
3308 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3311 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3312 mutex_unlock(&adev->grbm_idx_mutex);
3317 value = data[offset++];
3318 r = put_user(value, (uint32_t *)buf);
3334 static const struct file_operations amdgpu_debugfs_regs_fops = {
3335 .owner = THIS_MODULE,
3336 .read = amdgpu_debugfs_regs_read,
3337 .write = amdgpu_debugfs_regs_write,
3338 .llseek = default_llseek
3340 static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3341 .owner = THIS_MODULE,
3342 .read = amdgpu_debugfs_regs_didt_read,
3343 .write = amdgpu_debugfs_regs_didt_write,
3344 .llseek = default_llseek
3346 static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3347 .owner = THIS_MODULE,
3348 .read = amdgpu_debugfs_regs_pcie_read,
3349 .write = amdgpu_debugfs_regs_pcie_write,
3350 .llseek = default_llseek
3352 static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3353 .owner = THIS_MODULE,
3354 .read = amdgpu_debugfs_regs_smc_read,
3355 .write = amdgpu_debugfs_regs_smc_write,
3356 .llseek = default_llseek
3359 static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3360 .owner = THIS_MODULE,
3361 .read = amdgpu_debugfs_gca_config_read,
3362 .llseek = default_llseek
3365 static const struct file_operations amdgpu_debugfs_sensors_fops = {
3366 .owner = THIS_MODULE,
3367 .read = amdgpu_debugfs_sensor_read,
3368 .llseek = default_llseek
3371 static const struct file_operations amdgpu_debugfs_wave_fops = {
3372 .owner = THIS_MODULE,
3373 .read = amdgpu_debugfs_wave_read,
3374 .llseek = default_llseek
3376 static const struct file_operations amdgpu_debugfs_gpr_fops = {
3377 .owner = THIS_MODULE,
3378 .read = amdgpu_debugfs_gpr_read,
3379 .llseek = default_llseek
3382 static const struct file_operations *debugfs_regs[] = {
3383 &amdgpu_debugfs_regs_fops,
3384 &amdgpu_debugfs_regs_didt_fops,
3385 &amdgpu_debugfs_regs_pcie_fops,
3386 &amdgpu_debugfs_regs_smc_fops,
3387 &amdgpu_debugfs_gca_config_fops,
3388 &amdgpu_debugfs_sensors_fops,
3389 &amdgpu_debugfs_wave_fops,
3390 &amdgpu_debugfs_gpr_fops,
3393 static const char *debugfs_regs_names[] = {
3398 "amdgpu_gca_config",
3404 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3406 struct drm_minor *minor = adev->ddev->primary;
3407 struct dentry *ent, *root = minor->debugfs_root;
3410 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3411 ent = debugfs_create_file(debugfs_regs_names[i],
3412 S_IFREG | S_IRUGO, root,
3413 adev, debugfs_regs[i]);
3415 for (j = 0; j < i; j++) {
3416 debugfs_remove(adev->debugfs_regs[i]);
3417 adev->debugfs_regs[i] = NULL;
3419 return PTR_ERR(ent);
3423 i_size_write(ent->d_inode, adev->rmmio_size);
3424 adev->debugfs_regs[i] = ent;
3430 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3434 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3435 if (adev->debugfs_regs[i]) {
3436 debugfs_remove(adev->debugfs_regs[i]);
3437 adev->debugfs_regs[i] = NULL;
3442 int amdgpu_debugfs_init(struct drm_minor *minor)
3447 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3451 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }