2 * Copyright 2021 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
26 #include "amdgpu_atomfirmware.h"
27 #include "gmc_v11_0.h"
29 #include "athub/athub_3_0_0_sh_mask.h"
30 #include "athub/athub_3_0_0_offset.h"
31 #include "oss/osssys_6_0_0_offset.h"
32 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
33 #include "navi10_enum.h"
36 #include "soc15_common.h"
37 #include "nbio_v4_3.h"
38 #include "gfxhub_v3_0.h"
39 #include "mmhub_v3_0.h"
40 #include "mmhub_v3_0_2.h"
41 #include "athub_v3_0.h"
44 static int gmc_v11_0_ecc_interrupt_state(struct amdgpu_device *adev,
45 struct amdgpu_irq_src *src,
47 enum amdgpu_interrupt_state state)
53 gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
54 struct amdgpu_irq_src *src, unsigned type,
55 enum amdgpu_interrupt_state state)
58 case AMDGPU_IRQ_STATE_DISABLE:
60 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
62 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
64 case AMDGPU_IRQ_STATE_ENABLE:
66 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
68 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
77 static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
78 struct amdgpu_irq_src *source,
79 struct amdgpu_iv_entry *entry)
81 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
85 addr = (u64)entry->src_data[0] << 12;
86 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
88 if (!amdgpu_sriov_vf(adev)) {
90 * Issue a dummy read to wait for the status register to
91 * be updated to avoid reading an incorrect value due to
92 * the new fast GRBM interface.
94 if (entry->vmid_src == AMDGPU_GFXHUB_0)
95 RREG32(hub->vm_l2_pro_fault_status);
97 status = RREG32(hub->vm_l2_pro_fault_status);
98 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
101 if (printk_ratelimit()) {
102 struct amdgpu_task_info task_info;
104 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
105 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
108 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
109 "for process %s pid %d thread %s pid %d)\n",
110 entry->vmid_src ? "mmhub" : "gfxhub",
111 entry->src_id, entry->ring_id, entry->vmid,
112 entry->pasid, task_info.process_name, task_info.tgid,
113 task_info.task_name, task_info.pid);
114 dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",
115 addr, entry->client_id);
116 if (!amdgpu_sriov_vf(adev))
117 hub->vmhub_funcs->print_l2_protection_fault_status(adev, status);
123 static const struct amdgpu_irq_src_funcs gmc_v11_0_irq_funcs = {
124 .set = gmc_v11_0_vm_fault_interrupt_state,
125 .process = gmc_v11_0_process_interrupt,
128 static const struct amdgpu_irq_src_funcs gmc_v11_0_ecc_funcs = {
129 .set = gmc_v11_0_ecc_interrupt_state,
130 .process = amdgpu_umc_process_ecc_irq,
133 static void gmc_v11_0_set_irq_funcs(struct amdgpu_device *adev)
135 adev->gmc.vm_fault.num_types = 1;
136 adev->gmc.vm_fault.funcs = &gmc_v11_0_irq_funcs;
138 if (!amdgpu_sriov_vf(adev)) {
139 adev->gmc.ecc_irq.num_types = 1;
140 adev->gmc.ecc_irq.funcs = &gmc_v11_0_ecc_funcs;
145 * gmc_v11_0_use_invalidate_semaphore - judge whether to use semaphore
147 * @adev: amdgpu_device pointer
151 static bool gmc_v11_0_use_invalidate_semaphore(struct amdgpu_device *adev,
154 return ((vmhub == AMDGPU_MMHUB_0) &&
155 (!amdgpu_sriov_vf(adev)));
158 static bool gmc_v11_0_get_vmid_pasid_mapping_info(
159 struct amdgpu_device *adev,
160 uint8_t vmid, uint16_t *p_pasid)
162 *p_pasid = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid) & 0xffff;
169 * VMID 0 is the physical GPU addresses as used by the kernel.
170 * VMIDs 1-15 are used for userspace clients and are handled
171 * by the amdgpu vm/hsa code.
174 static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
175 unsigned int vmhub, uint32_t flush_type)
177 bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(adev, vmhub);
178 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
179 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
181 /* Use register 17 for GART */
182 const unsigned eng = 17;
185 spin_lock(&adev->gmc.invalidate_lock);
187 * It may lose gpuvm invalidate acknowldege state across power-gating
188 * off cycle, add semaphore acquire before invalidation and semaphore
189 * release after invalidation to avoid entering power gated state
193 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
195 for (i = 0; i < adev->usec_timeout; i++) {
196 /* a read return value of 1 means semaphore acuqire */
197 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +
198 hub->eng_distance * eng);
204 if (i >= adev->usec_timeout)
205 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
208 WREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
210 /* Wait for ACK with a delay.*/
211 for (i = 0; i < adev->usec_timeout; i++) {
212 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +
213 hub->eng_distance * eng);
221 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
224 * add semaphore release after invalidation,
225 * write with 0 means semaphore release
227 WREG32_NO_KIQ(hub->vm_inv_eng0_sem +
228 hub->eng_distance * eng, 0);
230 /* Issue additional private vm invalidation to MMHUB */
231 if ((vmhub != AMDGPU_GFXHUB_0) &&
232 (hub->vm_l2_bank_select_reserved_cid2)) {
233 inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
234 /* bit 25: RSERVED_CACHE_PRIVATE_INVALIDATION */
235 inv_req |= (1 << 25);
236 /* Issue private invalidation */
237 WREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2, inv_req);
238 /* Read back to ensure invalidation is done*/
239 RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
242 spin_unlock(&adev->gmc.invalidate_lock);
244 if (i < adev->usec_timeout)
247 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
251 * gmc_v11_0_flush_gpu_tlb - gart tlb flush callback
253 * @adev: amdgpu_device pointer
254 * @vmid: vm instance to flush
256 * Flush the TLB for the requested page table.
258 static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
259 uint32_t vmhub, uint32_t flush_type)
261 if ((vmhub == AMDGPU_GFXHUB_0) && !adev->gfx.is_poweron)
264 /* flush hdp cache */
265 adev->hdp.funcs->flush_hdp(adev, NULL);
267 /* For SRIOV run time, driver shouldn't access the register through MMIO
268 * Directly use kiq to do the vm invalidation instead
270 if (adev->gfx.kiq.ring.sched.ready && !adev->enable_mes &&
271 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
272 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
273 const unsigned eng = 17;
274 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
275 u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
276 u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
278 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
283 mutex_lock(&adev->mman.gtt_window_lock);
284 gmc_v11_0_flush_vm_hub(adev, vmid, vmhub, 0);
285 mutex_unlock(&adev->mman.gtt_window_lock);
290 * gmc_v11_0_flush_gpu_tlb_pasid - tlb flush via pasid
292 * @adev: amdgpu_device pointer
293 * @pasid: pasid to be flush
295 * Flush the TLB for the requested pasid.
297 static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
298 uint16_t pasid, uint32_t flush_type,
304 uint16_t queried_pasid;
306 struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
307 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
309 if (amdgpu_emu_mode == 0 && ring->sched.ready) {
310 spin_lock(&adev->gfx.kiq.ring_lock);
311 /* 2 dwords flush + 8 dwords fence */
312 amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
313 kiq->pmf->kiq_invalidate_tlbs(ring,
314 pasid, flush_type, all_hub);
315 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
317 amdgpu_ring_undo(ring);
318 spin_unlock(&adev->gfx.kiq.ring_lock);
322 amdgpu_ring_commit(ring);
323 spin_unlock(&adev->gfx.kiq.ring_lock);
324 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
326 dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
333 for (vmid = 1; vmid < 16; vmid++) {
335 ret = gmc_v11_0_get_vmid_pasid_mapping_info(adev, vmid,
337 if (ret && queried_pasid == pasid) {
339 for (i = 0; i < adev->num_vmhubs; i++)
340 gmc_v11_0_flush_gpu_tlb(adev, vmid,
343 gmc_v11_0_flush_gpu_tlb(adev, vmid,
344 AMDGPU_GFXHUB_0, flush_type);
353 static uint64_t gmc_v11_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
354 unsigned vmid, uint64_t pd_addr)
356 bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
357 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
358 uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
359 unsigned eng = ring->vm_inv_eng;
362 * It may lose gpuvm invalidate acknowldege state across power-gating
363 * off cycle, add semaphore acquire before invalidation and semaphore
364 * release after invalidation to avoid entering power gated state
368 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
370 /* a read return value of 1 means semaphore acuqire */
371 amdgpu_ring_emit_reg_wait(ring,
372 hub->vm_inv_eng0_sem +
373 hub->eng_distance * eng, 0x1, 0x1);
375 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
376 (hub->ctx_addr_distance * vmid),
377 lower_32_bits(pd_addr));
379 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
380 (hub->ctx_addr_distance * vmid),
381 upper_32_bits(pd_addr));
383 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
384 hub->eng_distance * eng,
385 hub->vm_inv_eng0_ack +
386 hub->eng_distance * eng,
389 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
392 * add semaphore release after invalidation,
393 * write with 0 means semaphore release
395 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
396 hub->eng_distance * eng, 0);
401 static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
404 struct amdgpu_device *adev = ring->adev;
407 /* MES fw manages IH_VMID_x_LUT updating */
408 if (ring->is_mes_queue)
411 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
412 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
414 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid;
416 amdgpu_ring_emit_wreg(ring, reg, pasid);
429 * 47:12 4k physical page base address
440 * 63:59 block fragment size
444 * 47:6 physical base address of PD or PTE
451 static uint64_t gmc_v11_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
454 case AMDGPU_VM_MTYPE_DEFAULT:
455 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
456 case AMDGPU_VM_MTYPE_NC:
457 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
458 case AMDGPU_VM_MTYPE_WC:
459 return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
460 case AMDGPU_VM_MTYPE_CC:
461 return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
462 case AMDGPU_VM_MTYPE_UC:
463 return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
465 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
469 static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level,
470 uint64_t *addr, uint64_t *flags)
472 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
473 *addr = adev->vm_manager.vram_base_offset + *addr -
474 adev->gmc.vram_start;
475 BUG_ON(*addr & 0xFFFF00000000003FULL);
477 if (!adev->gmc.translate_further)
480 if (level == AMDGPU_VM_PDB1) {
481 /* Set the block fragment size */
482 if (!(*flags & AMDGPU_PDE_PTE))
483 *flags |= AMDGPU_PDE_BFS(0x9);
485 } else if (level == AMDGPU_VM_PDB0) {
486 if (*flags & AMDGPU_PDE_PTE)
487 *flags &= ~AMDGPU_PDE_PTE;
489 *flags |= AMDGPU_PTE_TF;
493 static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
494 struct amdgpu_bo_va_mapping *mapping,
497 *flags &= ~AMDGPU_PTE_EXECUTABLE;
498 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
500 *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
501 *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
503 *flags &= ~AMDGPU_PTE_NOALLOC;
504 *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
506 if (mapping->flags & AMDGPU_PTE_PRT) {
507 *flags |= AMDGPU_PTE_PRT;
508 *flags |= AMDGPU_PTE_SNOOPED;
509 *flags |= AMDGPU_PTE_LOG;
510 *flags |= AMDGPU_PTE_SYSTEM;
511 *flags &= ~AMDGPU_PTE_VALID;
515 static unsigned gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev)
520 static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = {
521 .flush_gpu_tlb = gmc_v11_0_flush_gpu_tlb,
522 .flush_gpu_tlb_pasid = gmc_v11_0_flush_gpu_tlb_pasid,
523 .emit_flush_gpu_tlb = gmc_v11_0_emit_flush_gpu_tlb,
524 .emit_pasid_mapping = gmc_v11_0_emit_pasid_mapping,
525 .map_mtype = gmc_v11_0_map_mtype,
526 .get_vm_pde = gmc_v11_0_get_vm_pde,
527 .get_vm_pte = gmc_v11_0_get_vm_pte,
528 .get_vbios_fb_size = gmc_v11_0_get_vbios_fb_size,
531 static void gmc_v11_0_set_gmc_funcs(struct amdgpu_device *adev)
533 adev->gmc.gmc_funcs = &gmc_v11_0_gmc_funcs;
536 static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev)
538 switch (adev->ip_versions[UMC_HWIP][0]) {
539 case IP_VERSION(8, 10, 0):
540 case IP_VERSION(8, 11, 0):
548 static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev)
550 switch (adev->ip_versions[MMHUB_HWIP][0]) {
551 case IP_VERSION(3, 0, 2):
552 adev->mmhub.funcs = &mmhub_v3_0_2_funcs;
555 adev->mmhub.funcs = &mmhub_v3_0_funcs;
560 static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev)
562 adev->gfxhub.funcs = &gfxhub_v3_0_funcs;
565 static int gmc_v11_0_early_init(void *handle)
567 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
569 gmc_v11_0_set_gfxhub_funcs(adev);
570 gmc_v11_0_set_mmhub_funcs(adev);
571 gmc_v11_0_set_gmc_funcs(adev);
572 gmc_v11_0_set_irq_funcs(adev);
573 gmc_v11_0_set_umc_funcs(adev);
575 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
576 adev->gmc.shared_aperture_end =
577 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
578 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
579 adev->gmc.private_aperture_end =
580 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
585 static int gmc_v11_0_late_init(void *handle)
587 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
590 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
594 r = amdgpu_gmc_ras_late_init(adev);
598 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
601 static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev,
602 struct amdgpu_gmc *mc)
606 base = adev->mmhub.funcs->get_fb_location(adev);
608 amdgpu_gmc_vram_location(adev, &adev->gmc, base);
609 amdgpu_gmc_gart_location(adev, mc);
611 /* base offset of vram pages */
612 adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev);
616 * gmc_v11_0_mc_init - initialize the memory controller driver params
618 * @adev: amdgpu_device pointer
620 * Look up the amount of vram, vram width, and decide how to place
621 * vram and gart within the GPU's physical address space.
622 * Returns 0 for success.
624 static int gmc_v11_0_mc_init(struct amdgpu_device *adev)
628 /* size in MB on si */
629 adev->gmc.mc_vram_size =
630 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
631 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
633 if (!(adev->flags & AMD_IS_APU)) {
634 r = amdgpu_device_resize_fb_bar(adev);
638 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
639 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
642 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
643 adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev);
644 adev->gmc.aper_size = adev->gmc.real_vram_size;
647 /* In case the PCI BAR is larger than the actual amount of vram */
648 adev->gmc.visible_vram_size = adev->gmc.aper_size;
649 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
650 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
652 /* set the gart size */
653 if (amdgpu_gart_size == -1) {
654 adev->gmc.gart_size = 512ULL << 20;
656 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
658 gmc_v11_0_vram_gtt_location(adev, &adev->gmc);
663 static int gmc_v11_0_gart_init(struct amdgpu_device *adev)
668 WARN(1, "PCIE GART already initialized\n");
672 /* Initialize common gart structure */
673 r = amdgpu_gart_init(adev);
677 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
678 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
679 AMDGPU_PTE_EXECUTABLE;
681 return amdgpu_gart_table_vram_alloc(adev);
684 static int gmc_v11_0_sw_init(void *handle)
686 int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
687 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
689 adev->mmhub.funcs->init(adev);
691 spin_lock_init(&adev->gmc.invalidate_lock);
693 r = amdgpu_atomfirmware_get_vram_info(adev,
694 &vram_width, &vram_type, &vram_vendor);
695 adev->gmc.vram_width = vram_width;
697 adev->gmc.vram_type = vram_type;
698 adev->gmc.vram_vendor = vram_vendor;
700 switch (adev->ip_versions[GC_HWIP][0]) {
701 case IP_VERSION(11, 0, 0):
702 case IP_VERSION(11, 0, 1):
703 case IP_VERSION(11, 0, 2):
704 adev->num_vmhubs = 2;
706 * To fulfill 4-level page support,
707 * vm size is 256TB (48bit), maximum size,
708 * block size 512 (9bit)
710 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
716 /* This interrupt is VMC page fault.*/
717 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_VMC,
718 VMC_1_0__SRCID__VM_FAULT,
719 &adev->gmc.vm_fault);
724 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
725 UTCL2_1_0__SRCID__FAULT,
726 &adev->gmc.vm_fault);
730 if (!amdgpu_sriov_vf(adev)) {
731 /* interrupt sent to DF. */
732 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_DF, 0,
739 * Set the internal MC address mask This is the max address of the GPU's
740 * internal address space.
742 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
744 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
746 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
750 r = gmc_v11_0_mc_init(adev);
754 amdgpu_gmc_get_vbios_allocations(adev);
757 r = amdgpu_bo_init(adev);
761 r = gmc_v11_0_gart_init(adev);
767 * VMID 0 is reserved for System
768 * amdgpu graphics/compute will use VMIDs 1-7
769 * amdkfd will use VMIDs 8-15
771 adev->vm_manager.first_kfd_vmid = 8;
773 amdgpu_vm_manager_init(adev);
779 * gmc_v11_0_gart_fini - vm fini callback
781 * @adev: amdgpu_device pointer
783 * Tears down the driver GART/VM setup (CIK).
785 static void gmc_v11_0_gart_fini(struct amdgpu_device *adev)
787 amdgpu_gart_table_vram_free(adev);
790 static int gmc_v11_0_sw_fini(void *handle)
792 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
794 amdgpu_vm_manager_fini(adev);
795 gmc_v11_0_gart_fini(adev);
796 amdgpu_gem_force_release(adev);
797 amdgpu_bo_fini(adev);
802 static void gmc_v11_0_init_golden_registers(struct amdgpu_device *adev)
807 * gmc_v11_0_gart_enable - gart enable
809 * @adev: amdgpu_device pointer
811 static int gmc_v11_0_gart_enable(struct amdgpu_device *adev)
816 if (adev->gart.bo == NULL) {
817 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
821 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
823 r = adev->mmhub.funcs->gart_enable(adev);
827 /* Flush HDP after it is initialized */
828 adev->hdp.funcs->flush_hdp(adev, NULL);
830 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
833 adev->mmhub.funcs->set_fault_enable_default(adev, value);
834 gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
836 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
837 (unsigned)(adev->gmc.gart_size >> 20),
838 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
843 static int gmc_v11_0_hw_init(void *handle)
846 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
848 /* The sequence of these two function calls matters.*/
849 gmc_v11_0_init_golden_registers(adev);
851 r = gmc_v11_0_gart_enable(adev);
855 if (adev->umc.funcs && adev->umc.funcs->init_registers)
856 adev->umc.funcs->init_registers(adev);
862 * gmc_v11_0_gart_disable - gart disable
864 * @adev: amdgpu_device pointer
866 * This disables all VM page table.
868 static void gmc_v11_0_gart_disable(struct amdgpu_device *adev)
870 adev->mmhub.funcs->gart_disable(adev);
873 static int gmc_v11_0_hw_fini(void *handle)
875 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
877 if (amdgpu_sriov_vf(adev)) {
878 /* full access mode, so don't touch any GMC register */
879 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
883 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
884 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
885 gmc_v11_0_gart_disable(adev);
890 static int gmc_v11_0_suspend(void *handle)
892 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
894 gmc_v11_0_hw_fini(adev);
899 static int gmc_v11_0_resume(void *handle)
902 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
904 r = gmc_v11_0_hw_init(adev);
908 amdgpu_vmid_reset_all(adev);
913 static bool gmc_v11_0_is_idle(void *handle)
915 /* MC is always ready in GMC v11.*/
919 static int gmc_v11_0_wait_for_idle(void *handle)
921 /* There is no need to wait for MC idle in GMC v11.*/
925 static int gmc_v11_0_soft_reset(void *handle)
930 static int gmc_v11_0_set_clockgating_state(void *handle,
931 enum amd_clockgating_state state)
934 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
936 r = adev->mmhub.funcs->set_clockgating(adev, state);
940 return athub_v3_0_set_clockgating(adev, state);
943 static void gmc_v11_0_get_clockgating_state(void *handle, u64 *flags)
945 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
947 adev->mmhub.funcs->get_clockgating(adev, flags);
949 athub_v3_0_get_clockgating(adev, flags);
952 static int gmc_v11_0_set_powergating_state(void *handle,
953 enum amd_powergating_state state)
958 const struct amd_ip_funcs gmc_v11_0_ip_funcs = {
960 .early_init = gmc_v11_0_early_init,
961 .sw_init = gmc_v11_0_sw_init,
962 .hw_init = gmc_v11_0_hw_init,
963 .late_init = gmc_v11_0_late_init,
964 .sw_fini = gmc_v11_0_sw_fini,
965 .hw_fini = gmc_v11_0_hw_fini,
966 .suspend = gmc_v11_0_suspend,
967 .resume = gmc_v11_0_resume,
968 .is_idle = gmc_v11_0_is_idle,
969 .wait_for_idle = gmc_v11_0_wait_for_idle,
970 .soft_reset = gmc_v11_0_soft_reset,
971 .set_clockgating_state = gmc_v11_0_set_clockgating_state,
972 .set_powergating_state = gmc_v11_0_set_powergating_state,
973 .get_clockgating_state = gmc_v11_0_get_clockgating_state,
976 const struct amdgpu_ip_block_version gmc_v11_0_ip_block = {
977 .type = AMD_IP_BLOCK_TYPE_GMC,
981 .funcs = &gmc_v11_0_ip_funcs,