2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
26 #include "amdgpu_atomfirmware.h"
28 #include "vega10/soc15ip.h"
29 #include "vega10/HDP/hdp_4_0_offset.h"
30 #include "vega10/HDP/hdp_4_0_sh_mask.h"
31 #include "vega10/GC/gc_9_0_sh_mask.h"
32 #include "vega10/vega10_enum.h"
34 #include "soc15_common.h"
36 #include "nbio_v6_1.h"
37 #include "nbio_v7_0.h"
38 #include "gfxhub_v1_0.h"
39 #include "mmhub_v1_0.h"
41 #define mmDF_CS_AON0_DramBaseAddress0 0x0044
42 #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
43 //DF_CS_AON0_DramBaseAddress0
44 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
45 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
46 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
47 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
48 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
49 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
50 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
51 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
52 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
53 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
55 /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
56 #define AMDGPU_NUM_OF_VMIDS 8
58 static const u32 golden_settings_vega10_hdp[] =
60 0xf64, 0x0fffffff, 0x00000000,
61 0xf65, 0x0fffffff, 0x00000000,
62 0xf66, 0x0fffffff, 0x00000000,
63 0xf67, 0x0fffffff, 0x00000000,
64 0xf68, 0x0fffffff, 0x00000000,
65 0xf6a, 0x0fffffff, 0x00000000,
66 0xf6b, 0x0fffffff, 0x00000000,
67 0xf6c, 0x0fffffff, 0x00000000,
68 0xf6d, 0x0fffffff, 0x00000000,
69 0xf6e, 0x0fffffff, 0x00000000,
72 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
73 struct amdgpu_irq_src *src,
75 enum amdgpu_interrupt_state state)
77 struct amdgpu_vmhub *hub;
78 u32 tmp, reg, bits, i;
80 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
81 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
82 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
83 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
84 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
85 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
86 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
89 case AMDGPU_IRQ_STATE_DISABLE:
91 hub = &adev->vmhub[AMDGPU_MMHUB];
92 for (i = 0; i< 16; i++) {
93 reg = hub->vm_context0_cntl + i;
100 hub = &adev->vmhub[AMDGPU_GFXHUB];
101 for (i = 0; i < 16; i++) {
102 reg = hub->vm_context0_cntl + i;
108 case AMDGPU_IRQ_STATE_ENABLE:
110 hub = &adev->vmhub[AMDGPU_MMHUB];
111 for (i = 0; i< 16; i++) {
112 reg = hub->vm_context0_cntl + i;
119 hub = &adev->vmhub[AMDGPU_GFXHUB];
120 for (i = 0; i < 16; i++) {
121 reg = hub->vm_context0_cntl + i;
134 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
135 struct amdgpu_irq_src *source,
136 struct amdgpu_iv_entry *entry)
138 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vm_id_src];
142 addr = (u64)entry->src_data[0] << 12;
143 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
145 if (!amdgpu_sriov_vf(adev)) {
146 status = RREG32(hub->vm_l2_pro_fault_status);
147 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
150 if (printk_ratelimit()) {
152 "[%s] VMC page fault (src_id:%u ring:%u vm_id:%u pas_id:%u)\n",
153 entry->vm_id_src ? "mmhub" : "gfxhub",
154 entry->src_id, entry->ring_id, entry->vm_id,
156 dev_err(adev->dev, " at page 0x%016llx from %d\n",
157 addr, entry->client_id);
158 if (!amdgpu_sriov_vf(adev))
160 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
167 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
168 .set = gmc_v9_0_vm_fault_interrupt_state,
169 .process = gmc_v9_0_process_interrupt,
172 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
174 adev->mc.vm_fault.num_types = 1;
175 adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
178 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vm_id)
182 /* invalidate using legacy mode on vm_id*/
183 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
184 PER_VMID_INVALIDATE_REQ, 1 << vm_id);
185 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
186 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
187 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
188 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
189 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
190 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
191 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
192 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
199 * VMID 0 is the physical GPU addresses as used by the kernel.
200 * VMIDs 1-15 are used for userspace clients and are handled
201 * by the amdgpu vm/hsa code.
205 * gmc_v9_0_gart_flush_gpu_tlb - gart tlb flush callback
207 * @adev: amdgpu_device pointer
208 * @vmid: vm instance to flush
210 * Flush the TLB for the requested page table.
212 static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
215 /* Use register 17 for GART */
216 const unsigned eng = 17;
219 /* flush hdp cache */
220 if (adev->flags & AMD_IS_APU)
221 nbio_v7_0_hdp_flush(adev);
223 nbio_v6_1_hdp_flush(adev);
225 spin_lock(&adev->mc.invalidate_lock);
227 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
228 struct amdgpu_vmhub *hub = &adev->vmhub[i];
229 u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
231 WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
233 /* Busy wait for ACK.*/
234 for (j = 0; j < 100; j++) {
235 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
244 /* Wait for ACK with a delay.*/
245 for (j = 0; j < adev->usec_timeout; j++) {
246 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
252 if (j < adev->usec_timeout)
255 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
258 spin_unlock(&adev->mc.invalidate_lock);
262 * gmc_v9_0_gart_set_pte_pde - update the page tables using MMIO
264 * @adev: amdgpu_device pointer
265 * @cpu_pt_addr: cpu address of the page table
266 * @gpu_page_idx: entry in the page table to update
267 * @addr: dst addr to write into pte/pde
268 * @flags: access flags
270 * Update the page tables using the CPU.
272 static int gmc_v9_0_gart_set_pte_pde(struct amdgpu_device *adev,
274 uint32_t gpu_page_idx,
278 void __iomem *ptr = (void *)cpu_pt_addr;
282 * PTE format on VEGA 10:
291 * 47:12 4k physical page base address
301 * PDE format on VEGA 10:
302 * 63:59 block fragment size
306 * 47:6 physical base address of PD or PTE
314 * The following is for PTE only. GART does not have PDEs.
316 value = addr & 0x0000FFFFFFFFF000ULL;
318 writeq(value, ptr + (gpu_page_idx * 8));
322 static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
326 uint64_t pte_flag = 0;
328 if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
329 pte_flag |= AMDGPU_PTE_EXECUTABLE;
330 if (flags & AMDGPU_VM_PAGE_READABLE)
331 pte_flag |= AMDGPU_PTE_READABLE;
332 if (flags & AMDGPU_VM_PAGE_WRITEABLE)
333 pte_flag |= AMDGPU_PTE_WRITEABLE;
335 switch (flags & AMDGPU_VM_MTYPE_MASK) {
336 case AMDGPU_VM_MTYPE_DEFAULT:
337 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
339 case AMDGPU_VM_MTYPE_NC:
340 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
342 case AMDGPU_VM_MTYPE_WC:
343 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_WC);
345 case AMDGPU_VM_MTYPE_CC:
346 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_CC);
348 case AMDGPU_VM_MTYPE_UC:
349 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_UC);
352 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
356 if (flags & AMDGPU_VM_PAGE_PRT)
357 pte_flag |= AMDGPU_PTE_PRT;
362 static u64 gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, u64 addr)
364 addr = adev->vm_manager.vram_base_offset + addr - adev->mc.vram_start;
365 BUG_ON(addr & 0xFFFF00000000003FULL);
369 static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
370 .flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
371 .set_pte_pde = gmc_v9_0_gart_set_pte_pde,
372 .get_invalidate_req = gmc_v9_0_get_invalidate_req,
373 .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
374 .get_vm_pde = gmc_v9_0_get_vm_pde
377 static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
379 if (adev->gart.gart_funcs == NULL)
380 adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
383 static int gmc_v9_0_early_init(void *handle)
385 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
387 gmc_v9_0_set_gart_funcs(adev);
388 gmc_v9_0_set_irq_funcs(adev);
393 static int gmc_v9_0_late_init(void *handle)
395 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
396 unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 3, 3 };
399 for(i = 0; i < adev->num_rings; ++i) {
400 struct amdgpu_ring *ring = adev->rings[i];
401 unsigned vmhub = ring->funcs->vmhub;
403 ring->vm_inv_eng = vm_inv_eng[vmhub]++;
404 dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
405 ring->idx, ring->name, ring->vm_inv_eng,
409 /* Engine 17 is used for GART flushes */
410 for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
411 BUG_ON(vm_inv_eng[i] > 17);
413 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
416 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
417 struct amdgpu_mc *mc)
420 if (!amdgpu_sriov_vf(adev))
421 base = mmhub_v1_0_get_fb_location(adev);
422 amdgpu_vram_location(adev, &adev->mc, base);
423 adev->mc.gtt_base_align = 0;
424 amdgpu_gtt_location(adev, mc);
425 /* base offset of vram pages */
426 if (adev->flags & AMD_IS_APU)
427 adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
429 adev->vm_manager.vram_base_offset = 0;
433 * gmc_v9_0_mc_init - initialize the memory controller driver params
435 * @adev: amdgpu_device pointer
437 * Look up the amount of vram, vram width, and decide how to place
438 * vram and gart within the GPU's physical address space.
439 * Returns 0 for success.
441 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
444 int chansize, numchan;
446 adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
447 if (!adev->mc.vram_width) {
448 /* hbm memory channel size */
451 tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
452 tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
453 tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
484 adev->mc.vram_width = numchan * chansize;
487 /* Could aper size report 0 ? */
488 adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
489 adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
490 /* size in MB on si */
491 adev->mc.mc_vram_size =
492 ((adev->flags & AMD_IS_APU) ? nbio_v7_0_get_memsize(adev) :
493 nbio_v6_1_get_memsize(adev)) * 1024ULL * 1024ULL;
494 adev->mc.real_vram_size = adev->mc.mc_vram_size;
495 adev->mc.visible_vram_size = adev->mc.aper_size;
497 /* In case the PCI BAR is larger than the actual amount of vram */
498 if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
499 adev->mc.visible_vram_size = adev->mc.real_vram_size;
501 amdgpu_gart_set_defaults(adev);
502 gmc_v9_0_vram_gtt_location(adev, &adev->mc);
507 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
511 if (adev->gart.robj) {
512 WARN(1, "VEGA10 PCIE GART already initialized\n");
515 /* Initialize common gart structure */
516 r = amdgpu_gart_init(adev);
519 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
520 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
521 AMDGPU_PTE_EXECUTABLE;
522 return amdgpu_gart_table_vram_alloc(adev);
525 static int gmc_v9_0_sw_init(void *handle)
529 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
531 gfxhub_v1_0_init(adev);
532 mmhub_v1_0_init(adev);
534 spin_lock_init(&adev->mc.invalidate_lock);
536 switch (adev->asic_type) {
538 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
539 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
540 adev->vm_manager.vm_size = 1U << 18;
541 adev->vm_manager.block_size = 9;
542 adev->vm_manager.num_level = 3;
544 /* vm_size is 64GB for legacy 2-level page support*/
545 amdgpu_vm_adjust_size(adev, 64);
546 adev->vm_manager.num_level = 1;
550 /* XXX Don't know how to get VRAM type yet. */
551 adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM;
553 * To fulfill 4-level page support,
554 * vm size is 256TB (48bit), maximum size of Vega10,
555 * block size 512 (9bit)
557 adev->vm_manager.vm_size = 1U << 18;
558 adev->vm_manager.block_size = 9;
559 adev->vm_manager.num_level = 3;
565 DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
566 adev->vm_manager.vm_size,
567 adev->vm_manager.block_size);
569 /* This interrupt is VMC page fault.*/
570 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0,
572 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UTCL2, 0,
578 adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
580 /* Set the internal MC address mask
581 * This is the max address of the GPU's
582 * internal address space.
584 adev->mc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
587 * It needs to reserve 8M stolen memory for vega10
588 * TODO: Figure out how to avoid that...
590 adev->mc.stolen_size = 8 * 1024 * 1024;
592 /* set DMA mask + need_dma32 flags.
593 * PCIE - can handle 44-bits.
594 * IGP - can handle 44-bits
595 * PCI - dma32 for legacy pci gart, 44 bits on vega10
597 adev->need_dma32 = false;
598 dma_bits = adev->need_dma32 ? 32 : 44;
599 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
601 adev->need_dma32 = true;
603 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
605 r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
607 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
608 printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
611 r = gmc_v9_0_mc_init(adev);
616 r = amdgpu_bo_init(adev);
620 r = gmc_v9_0_gart_init(adev);
626 * VMID 0 is reserved for System
627 * amdgpu graphics/compute will use VMIDs 1-7
628 * amdkfd will use VMIDs 8-15
630 adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
631 adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
633 amdgpu_vm_manager_init(adev);
639 * gmc_v8_0_gart_fini - vm fini callback
641 * @adev: amdgpu_device pointer
643 * Tears down the driver GART/VM setup (CIK).
645 static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
647 amdgpu_gart_table_vram_free(adev);
648 amdgpu_gart_fini(adev);
651 static int gmc_v9_0_sw_fini(void *handle)
653 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
655 amdgpu_vm_manager_fini(adev);
656 gmc_v9_0_gart_fini(adev);
657 amdgpu_gem_force_release(adev);
658 amdgpu_bo_fini(adev);
663 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
665 switch (adev->asic_type) {
676 * gmc_v9_0_gart_enable - gart enable
678 * @adev: amdgpu_device pointer
680 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
686 amdgpu_program_register_sequence(adev,
687 golden_settings_vega10_hdp,
688 (const u32)ARRAY_SIZE(golden_settings_vega10_hdp));
690 if (adev->gart.robj == NULL) {
691 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
694 r = amdgpu_gart_table_vram_pin(adev);
698 /* After HDP is initialized, flush HDP.*/
699 if (adev->flags & AMD_IS_APU)
700 nbio_v7_0_hdp_flush(adev);
702 nbio_v6_1_hdp_flush(adev);
704 switch (adev->asic_type) {
706 mmhub_v1_0_initialize_power_gating(adev);
707 mmhub_v1_0_update_power_gating(adev, true);
713 r = gfxhub_v1_0_gart_enable(adev);
717 r = mmhub_v1_0_gart_enable(adev);
721 tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
722 tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
723 WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
725 tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
726 WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
729 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
734 gfxhub_v1_0_set_fault_enable_default(adev, value);
735 mmhub_v1_0_set_fault_enable_default(adev, value);
737 gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
739 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
740 (unsigned)(adev->mc.gtt_size >> 20),
741 (unsigned long long)adev->gart.table_addr);
742 adev->gart.ready = true;
746 static int gmc_v9_0_hw_init(void *handle)
749 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
751 /* The sequence of these two function calls matters.*/
752 gmc_v9_0_init_golden_registers(adev);
754 r = gmc_v9_0_gart_enable(adev);
760 * gmc_v9_0_gart_disable - gart disable
762 * @adev: amdgpu_device pointer
764 * This disables all VM page table.
766 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
768 gfxhub_v1_0_gart_disable(adev);
769 mmhub_v1_0_gart_disable(adev);
770 amdgpu_gart_table_vram_unpin(adev);
773 static int gmc_v9_0_hw_fini(void *handle)
775 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
777 if (amdgpu_sriov_vf(adev)) {
778 /* full access mode, so don't touch any GMC register */
779 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
783 amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
784 gmc_v9_0_gart_disable(adev);
789 static int gmc_v9_0_suspend(void *handle)
791 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
793 gmc_v9_0_hw_fini(adev);
798 static int gmc_v9_0_resume(void *handle)
801 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
803 r = gmc_v9_0_hw_init(adev);
807 amdgpu_vm_reset_all_ids(adev);
812 static bool gmc_v9_0_is_idle(void *handle)
814 /* MC is always ready in GMC v9.*/
818 static int gmc_v9_0_wait_for_idle(void *handle)
820 /* There is no need to wait for MC idle in GMC v9.*/
824 static int gmc_v9_0_soft_reset(void *handle)
826 /* XXX for emulation.*/
830 static int gmc_v9_0_set_clockgating_state(void *handle,
831 enum amd_clockgating_state state)
833 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
835 return mmhub_v1_0_set_clockgating(adev, state);
838 static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
840 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
842 mmhub_v1_0_get_clockgating(adev, flags);
845 static int gmc_v9_0_set_powergating_state(void *handle,
846 enum amd_powergating_state state)
851 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
853 .early_init = gmc_v9_0_early_init,
854 .late_init = gmc_v9_0_late_init,
855 .sw_init = gmc_v9_0_sw_init,
856 .sw_fini = gmc_v9_0_sw_fini,
857 .hw_init = gmc_v9_0_hw_init,
858 .hw_fini = gmc_v9_0_hw_fini,
859 .suspend = gmc_v9_0_suspend,
860 .resume = gmc_v9_0_resume,
861 .is_idle = gmc_v9_0_is_idle,
862 .wait_for_idle = gmc_v9_0_wait_for_idle,
863 .soft_reset = gmc_v9_0_soft_reset,
864 .set_clockgating_state = gmc_v9_0_set_clockgating_state,
865 .set_powergating_state = gmc_v9_0_set_powergating_state,
866 .get_clockgating_state = gmc_v9_0_get_clockgating_state,
869 const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
871 .type = AMD_IP_BLOCK_TYPE_GMC,
875 .funcs = &gmc_v9_0_ip_funcs,