2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
26 #include <drm/drm_cache.h>
29 #include "amdgpu_atomfirmware.h"
30 #include "gmc_v10_0.h"
33 #include "athub/athub_2_0_0_sh_mask.h"
34 #include "athub/athub_2_0_0_offset.h"
35 #include "dcn/dcn_2_0_0_offset.h"
36 #include "dcn/dcn_2_0_0_sh_mask.h"
37 #include "oss/osssys_5_0_0_offset.h"
38 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
39 #include "navi10_enum.h"
43 #include "soc15_common.h"
45 #include "nbio_v2_3.h"
47 #include "gfxhub_v2_0.h"
48 #include "gfxhub_v2_1.h"
49 #include "mmhub_v2_0.h"
50 #include "mmhub_v2_3.h"
51 #include "athub_v2_0.h"
52 #include "athub_v2_1.h"
54 #include "amdgpu_reset.h"
57 static const struct soc15_reg_golden golden_settings_navi10_hdp[] =
59 /* TODO add golden setting for hdp */
63 static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev,
64 struct amdgpu_irq_src *src,
66 enum amdgpu_interrupt_state state)
72 gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
73 struct amdgpu_irq_src *src, unsigned type,
74 enum amdgpu_interrupt_state state)
77 case AMDGPU_IRQ_STATE_DISABLE:
79 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false);
81 /* This works because this interrupt is only
82 * enabled at init/resume and disabled in
83 * fini/suspend, so the overall state doesn't
84 * change over the course of suspend/resume.
87 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false);
89 case AMDGPU_IRQ_STATE_ENABLE:
91 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true);
93 /* This works because this interrupt is only
94 * enabled at init/resume and disabled in
95 * fini/suspend, so the overall state doesn't
96 * change over the course of suspend/resume.
99 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true);
108 static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
109 struct amdgpu_irq_src *source,
110 struct amdgpu_iv_entry *entry)
112 bool retry_fault = !!(entry->src_data[1] & 0x80);
113 bool write_fault = !!(entry->src_data[1] & 0x20);
114 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
115 struct amdgpu_task_info task_info;
119 addr = (u64)entry->src_data[0] << 12;
120 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
123 /* Returning 1 here also prevents sending the IV to the KFD */
125 /* Process it onyl if it's the first fault for this address */
126 if (entry->ih != &adev->irq.ih_soft &&
127 amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
131 /* Delegate it to a different ring if the hardware hasn't
134 if (entry->ih == &adev->irq.ih) {
135 amdgpu_irq_delegate(adev, entry, 8);
139 /* Try to handle the recoverable page faults by filling page
142 if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr, write_fault))
146 if (!amdgpu_sriov_vf(adev)) {
148 * Issue a dummy read to wait for the status register to
149 * be updated to avoid reading an incorrect value due to
150 * the new fast GRBM interface.
152 if ((entry->vmid_src == AMDGPU_GFXHUB(0)) &&
153 (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0)))
154 RREG32(hub->vm_l2_pro_fault_status);
156 status = RREG32(hub->vm_l2_pro_fault_status);
157 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
160 if (!printk_ratelimit())
163 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
164 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
167 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
168 "for process %s pid %d thread %s pid %d)\n",
169 entry->vmid_src ? "mmhub" : "gfxhub",
170 entry->src_id, entry->ring_id, entry->vmid,
171 entry->pasid, task_info.process_name, task_info.tgid,
172 task_info.task_name, task_info.pid);
173 dev_err(adev->dev, " in page starting at address 0x%016llx from client 0x%x (%s)\n",
174 addr, entry->client_id,
175 soc15_ih_clientid_name[entry->client_id]);
177 if (!amdgpu_sriov_vf(adev))
178 hub->vmhub_funcs->print_l2_protection_fault_status(adev,
184 static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = {
185 .set = gmc_v10_0_vm_fault_interrupt_state,
186 .process = gmc_v10_0_process_interrupt,
189 static const struct amdgpu_irq_src_funcs gmc_v10_0_ecc_funcs = {
190 .set = gmc_v10_0_ecc_interrupt_state,
191 .process = amdgpu_umc_process_ecc_irq,
194 static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
196 adev->gmc.vm_fault.num_types = 1;
197 adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs;
199 if (!amdgpu_sriov_vf(adev)) {
200 adev->gmc.ecc_irq.num_types = 1;
201 adev->gmc.ecc_irq.funcs = &gmc_v10_0_ecc_funcs;
206 * gmc_v10_0_use_invalidate_semaphore - judge whether to use semaphore
208 * @adev: amdgpu_device pointer
212 static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
215 return ((vmhub == AMDGPU_MMHUB0(0)) &&
216 (!amdgpu_sriov_vf(adev)));
219 static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info(
220 struct amdgpu_device *adev,
221 uint8_t vmid, uint16_t *p_pasid)
225 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
227 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
229 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
234 * VMID 0 is the physical GPU addresses as used by the kernel.
235 * VMIDs 1-15 are used for userspace clients and are handled
236 * by the amdgpu vm/hsa code.
239 static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
240 unsigned int vmhub, uint32_t flush_type)
242 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
243 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
244 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
246 /* Use register 17 for GART */
247 const unsigned eng = 17;
249 unsigned char hub_ip = 0;
251 hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ?
252 GC_HWIP : MMHUB_HWIP;
254 spin_lock(&adev->gmc.invalidate_lock);
256 * It may lose gpuvm invalidate acknowldege state across power-gating
257 * off cycle, add semaphore acquire before invalidation and semaphore
258 * release after invalidation to avoid entering power gated state
262 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
264 for (i = 0; i < adev->usec_timeout; i++) {
265 /* a read return value of 1 means semaphore acuqire */
266 tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
267 hub->eng_distance * eng, hub_ip);
274 if (i >= adev->usec_timeout)
275 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
278 WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +
279 hub->eng_distance * eng,
283 * Issue a dummy read to wait for the ACK register to be cleared
284 * to avoid a false ACK due to the new fast GRBM interface.
286 if ((vmhub == AMDGPU_GFXHUB(0)) &&
287 (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0)))
288 RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +
289 hub->eng_distance * eng, hub_ip);
291 /* Wait for ACK with a delay.*/
292 for (i = 0; i < adev->usec_timeout; i++) {
293 tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack +
294 hub->eng_distance * eng, hub_ip);
303 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
306 * add semaphore release after invalidation,
307 * write with 0 means semaphore release
309 WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
310 hub->eng_distance * eng, 0, hub_ip);
312 spin_unlock(&adev->gmc.invalidate_lock);
314 if (i < adev->usec_timeout)
317 DRM_ERROR("Timeout waiting for VM flush hub: %d!\n", vmhub);
321 * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback
323 * @adev: amdgpu_device pointer
324 * @vmid: vm instance to flush
326 * @flush_type: the flush type
328 * Flush the TLB for the requested page table.
330 static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
331 uint32_t vmhub, uint32_t flush_type)
333 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
334 struct dma_fence *fence;
335 struct amdgpu_job *job;
339 /* flush hdp cache */
340 adev->hdp.funcs->flush_hdp(adev, NULL);
342 /* For SRIOV run time, driver shouldn't access the register through MMIO
343 * Directly use kiq to do the vm invalidation instead
345 if (adev->gfx.kiq[0].ring.sched.ready && !adev->enable_mes &&
346 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
347 down_read_trylock(&adev->reset_domain->sem)) {
348 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
349 const unsigned eng = 17;
350 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
351 u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
352 u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
354 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
357 up_read(&adev->reset_domain->sem);
361 mutex_lock(&adev->mman.gtt_window_lock);
363 if (vmhub == AMDGPU_MMHUB0(0)) {
364 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB0(0), 0);
365 mutex_unlock(&adev->mman.gtt_window_lock);
369 BUG_ON(vmhub != AMDGPU_GFXHUB(0));
371 if (!adev->mman.buffer_funcs_enabled ||
372 !adev->ib_pool_ready ||
373 amdgpu_in_reset(adev) ||
374 ring->sched.ready == false) {
375 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB(0), 0);
376 mutex_unlock(&adev->mman.gtt_window_lock);
380 /* The SDMA on Navi has a bug which can theoretically result in memory
381 * corruption if an invalidation happens at the same time as an VA
382 * translation. Avoid this by doing the invalidation from the SDMA
385 r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.high_pr,
386 AMDGPU_FENCE_OWNER_UNDEFINED,
387 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
392 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
393 job->vm_needs_flush = true;
394 job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
395 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
396 fence = amdgpu_job_submit(job);
398 mutex_unlock(&adev->mman.gtt_window_lock);
400 dma_fence_wait(fence, false);
401 dma_fence_put(fence);
406 mutex_unlock(&adev->mman.gtt_window_lock);
407 DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
411 * gmc_v10_0_flush_gpu_tlb_pasid - tlb flush via pasid
413 * @adev: amdgpu_device pointer
414 * @pasid: pasid to be flush
415 * @flush_type: the flush type
416 * @all_hub: Used with PACKET3_INVALIDATE_TLBS_ALL_HUB()
417 * @inst: is used to select which instance of KIQ to use for the invalidation
419 * Flush the TLB for the requested pasid.
421 static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
422 uint16_t pasid, uint32_t flush_type,
423 bool all_hub, uint32_t inst)
428 uint16_t queried_pasid;
430 u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
431 struct amdgpu_ring *ring = &adev->gfx.kiq[0].ring;
432 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
434 if (amdgpu_emu_mode == 0 && ring->sched.ready) {
435 spin_lock(&adev->gfx.kiq[0].ring_lock);
436 /* 2 dwords flush + 8 dwords fence */
437 amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
438 kiq->pmf->kiq_invalidate_tlbs(ring,
439 pasid, flush_type, all_hub);
440 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
442 amdgpu_ring_undo(ring);
443 spin_unlock(&adev->gfx.kiq[0].ring_lock);
447 amdgpu_ring_commit(ring);
448 spin_unlock(&adev->gfx.kiq[0].ring_lock);
449 r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
451 dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
458 for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
460 ret = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
462 if (ret && queried_pasid == pasid) {
464 for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS)
465 gmc_v10_0_flush_gpu_tlb(adev, vmid,
468 gmc_v10_0_flush_gpu_tlb(adev, vmid,
469 AMDGPU_GFXHUB(0), flush_type);
471 if (!adev->enable_mes)
479 static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
480 unsigned vmid, uint64_t pd_addr)
482 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
483 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
484 uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
485 unsigned eng = ring->vm_inv_eng;
488 * It may lose gpuvm invalidate acknowldege state across power-gating
489 * off cycle, add semaphore acquire before invalidation and semaphore
490 * release after invalidation to avoid entering power gated state
494 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
496 /* a read return value of 1 means semaphore acuqire */
497 amdgpu_ring_emit_reg_wait(ring,
498 hub->vm_inv_eng0_sem +
499 hub->eng_distance * eng, 0x1, 0x1);
501 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
502 (hub->ctx_addr_distance * vmid),
503 lower_32_bits(pd_addr));
505 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
506 (hub->ctx_addr_distance * vmid),
507 upper_32_bits(pd_addr));
509 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
510 hub->eng_distance * eng,
511 hub->vm_inv_eng0_ack +
512 hub->eng_distance * eng,
515 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
518 * add semaphore release after invalidation,
519 * write with 0 means semaphore release
521 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
522 hub->eng_distance * eng, 0);
527 static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
530 struct amdgpu_device *adev = ring->adev;
533 /* MES fw manages IH_VMID_x_LUT updating */
534 if (ring->is_mes_queue)
537 if (ring->vm_hub == AMDGPU_GFXHUB(0))
538 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
540 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
542 amdgpu_ring_emit_wreg(ring, reg, pasid);
546 * PTE format on NAVI 10:
548 * 58 reserved and for sienna_cichlid is used for MALL noalloc
556 * 47:12 4k physical page base address
566 * PDE format on NAVI 10:
567 * 63:59 block fragment size
571 * 47:6 physical base address of PD or PTE
578 static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
581 case AMDGPU_VM_MTYPE_DEFAULT:
582 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
583 case AMDGPU_VM_MTYPE_NC:
584 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
585 case AMDGPU_VM_MTYPE_WC:
586 return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
587 case AMDGPU_VM_MTYPE_CC:
588 return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
589 case AMDGPU_VM_MTYPE_UC:
590 return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
592 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
596 static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
597 uint64_t *addr, uint64_t *flags)
599 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
600 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
601 BUG_ON(*addr & 0xFFFF00000000003FULL);
603 if (!adev->gmc.translate_further)
606 if (level == AMDGPU_VM_PDB1) {
607 /* Set the block fragment size */
608 if (!(*flags & AMDGPU_PDE_PTE))
609 *flags |= AMDGPU_PDE_BFS(0x9);
611 } else if (level == AMDGPU_VM_PDB0) {
612 if (*flags & AMDGPU_PDE_PTE)
613 *flags &= ~AMDGPU_PDE_PTE;
615 *flags |= AMDGPU_PTE_TF;
619 static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
620 struct amdgpu_bo_va_mapping *mapping,
623 struct amdgpu_bo *bo = mapping->bo_va->base.bo;
625 *flags &= ~AMDGPU_PTE_EXECUTABLE;
626 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
628 *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
629 *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
631 *flags &= ~AMDGPU_PTE_NOALLOC;
632 *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
634 if (mapping->flags & AMDGPU_PTE_PRT) {
635 *flags |= AMDGPU_PTE_PRT;
636 *flags |= AMDGPU_PTE_SNOOPED;
637 *flags |= AMDGPU_PTE_LOG;
638 *flags |= AMDGPU_PTE_SYSTEM;
639 *flags &= ~AMDGPU_PTE_VALID;
642 if (bo && bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
643 AMDGPU_GEM_CREATE_UNCACHED))
644 *flags = (*flags & ~AMDGPU_PTE_MTYPE_NV10_MASK) |
645 AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
648 static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
650 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
653 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
654 size = AMDGPU_VBIOS_VGA_ALLOCATION;
659 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
660 pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
661 size = (REG_GET_FIELD(viewport,
662 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
663 REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
670 static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
671 .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
672 .flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
673 .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
674 .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
675 .map_mtype = gmc_v10_0_map_mtype,
676 .get_vm_pde = gmc_v10_0_get_vm_pde,
677 .get_vm_pte = gmc_v10_0_get_vm_pte,
678 .get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size,
681 static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
683 if (adev->gmc.gmc_funcs == NULL)
684 adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs;
687 static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
689 switch (adev->ip_versions[UMC_HWIP][0]) {
690 case IP_VERSION(8, 7, 0):
691 adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM;
692 adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM;
693 adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
694 adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
695 adev->umc.retire_unit = 1;
696 adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
697 adev->umc.ras = &umc_v8_7_ras;
704 static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
706 switch (adev->ip_versions[MMHUB_HWIP][0]) {
707 case IP_VERSION(2, 3, 0):
708 case IP_VERSION(2, 4, 0):
709 case IP_VERSION(2, 4, 1):
710 adev->mmhub.funcs = &mmhub_v2_3_funcs;
713 adev->mmhub.funcs = &mmhub_v2_0_funcs;
718 static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
720 switch (adev->ip_versions[GC_HWIP][0]) {
721 case IP_VERSION(10, 3, 0):
722 case IP_VERSION(10, 3, 2):
723 case IP_VERSION(10, 3, 1):
724 case IP_VERSION(10, 3, 4):
725 case IP_VERSION(10, 3, 5):
726 case IP_VERSION(10, 3, 6):
727 case IP_VERSION(10, 3, 3):
728 case IP_VERSION(10, 3, 7):
729 adev->gfxhub.funcs = &gfxhub_v2_1_funcs;
732 adev->gfxhub.funcs = &gfxhub_v2_0_funcs;
738 static int gmc_v10_0_early_init(void *handle)
740 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
742 gmc_v10_0_set_mmhub_funcs(adev);
743 gmc_v10_0_set_gfxhub_funcs(adev);
744 gmc_v10_0_set_gmc_funcs(adev);
745 gmc_v10_0_set_irq_funcs(adev);
746 gmc_v10_0_set_umc_funcs(adev);
748 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
749 adev->gmc.shared_aperture_end =
750 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
751 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
752 adev->gmc.private_aperture_end =
753 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
758 static int gmc_v10_0_late_init(void *handle)
760 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
763 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
767 r = amdgpu_gmc_ras_late_init(adev);
771 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
774 static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
775 struct amdgpu_gmc *mc)
779 base = adev->gfxhub.funcs->get_fb_location(adev);
781 /* add the xgmi offset of the physical node */
782 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
784 amdgpu_gmc_vram_location(adev, &adev->gmc, base);
785 amdgpu_gmc_gart_location(adev, mc);
786 amdgpu_gmc_agp_location(adev, mc);
788 /* base offset of vram pages */
789 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
791 /* add the xgmi offset of the physical node */
792 adev->vm_manager.vram_base_offset +=
793 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
797 * gmc_v10_0_mc_init - initialize the memory controller driver params
799 * @adev: amdgpu_device pointer
801 * Look up the amount of vram, vram width, and decide how to place
802 * vram and gart within the GPU's physical address space.
803 * Returns 0 for success.
805 static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
809 /* size in MB on si */
810 adev->gmc.mc_vram_size =
811 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
812 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
814 if (!(adev->flags & AMD_IS_APU)) {
815 r = amdgpu_device_resize_fb_bar(adev);
819 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
820 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
823 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
824 adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
825 adev->gmc.aper_size = adev->gmc.real_vram_size;
829 adev->gmc.visible_vram_size = adev->gmc.aper_size;
831 /* set the gart size */
832 if (amdgpu_gart_size == -1) {
833 switch (adev->ip_versions[GC_HWIP][0]) {
835 adev->gmc.gart_size = 512ULL << 20;
837 case IP_VERSION(10, 3, 1): /* DCE SG support */
838 case IP_VERSION(10, 3, 3): /* DCE SG support */
839 case IP_VERSION(10, 3, 6): /* DCE SG support */
840 case IP_VERSION(10, 3, 7): /* DCE SG support */
841 adev->gmc.gart_size = 1024ULL << 20;
845 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
848 gmc_v10_0_vram_gtt_location(adev, &adev->gmc);
853 static int gmc_v10_0_gart_init(struct amdgpu_device *adev)
858 WARN(1, "NAVI10 PCIE GART already initialized\n");
862 /* Initialize common gart structure */
863 r = amdgpu_gart_init(adev);
867 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
868 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
869 AMDGPU_PTE_EXECUTABLE;
871 return amdgpu_gart_table_vram_alloc(adev);
874 static int gmc_v10_0_sw_init(void *handle)
876 int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
877 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
879 adev->gfxhub.funcs->init(adev);
881 adev->mmhub.funcs->init(adev);
883 spin_lock_init(&adev->gmc.invalidate_lock);
885 if ((adev->flags & AMD_IS_APU) && amdgpu_emu_mode == 1) {
886 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
887 adev->gmc.vram_width = 64;
888 } else if (amdgpu_emu_mode == 1) {
889 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_GDDR6;
890 adev->gmc.vram_width = 1 * 128; /* numchan * chansize */
892 r = amdgpu_atomfirmware_get_vram_info(adev,
893 &vram_width, &vram_type, &vram_vendor);
894 adev->gmc.vram_width = vram_width;
896 adev->gmc.vram_type = vram_type;
897 adev->gmc.vram_vendor = vram_vendor;
900 switch (adev->ip_versions[GC_HWIP][0]) {
901 case IP_VERSION(10, 3, 0):
902 adev->gmc.mall_size = 128 * 1024 * 1024;
904 case IP_VERSION(10, 3, 2):
905 adev->gmc.mall_size = 96 * 1024 * 1024;
907 case IP_VERSION(10, 3, 4):
908 adev->gmc.mall_size = 32 * 1024 * 1024;
910 case IP_VERSION(10, 3, 5):
911 adev->gmc.mall_size = 16 * 1024 * 1024;
914 adev->gmc.mall_size = 0;
918 switch (adev->ip_versions[GC_HWIP][0]) {
919 case IP_VERSION(10, 1, 10):
920 case IP_VERSION(10, 1, 1):
921 case IP_VERSION(10, 1, 2):
922 case IP_VERSION(10, 1, 3):
923 case IP_VERSION(10, 1, 4):
924 case IP_VERSION(10, 3, 0):
925 case IP_VERSION(10, 3, 2):
926 case IP_VERSION(10, 3, 1):
927 case IP_VERSION(10, 3, 4):
928 case IP_VERSION(10, 3, 5):
929 case IP_VERSION(10, 3, 6):
930 case IP_VERSION(10, 3, 3):
931 case IP_VERSION(10, 3, 7):
932 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
933 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
935 * To fulfill 4-level page support,
936 * vm size is 256TB (48bit), maximum size of Navi10/Navi14/Navi12,
937 * block size 512 (9bit)
939 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
945 /* This interrupt is VMC page fault.*/
946 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC,
947 VMC_1_0__SRCID__VM_FAULT,
948 &adev->gmc.vm_fault);
953 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2,
954 UTCL2_1_0__SRCID__FAULT,
955 &adev->gmc.vm_fault);
959 if (!amdgpu_sriov_vf(adev)) {
960 /* interrupt sent to DF. */
961 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
968 * Set the internal MC address mask This is the max address of the GPU's
969 * internal address space.
971 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
973 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
975 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
979 adev->need_swiotlb = drm_need_swiotlb(44);
981 r = gmc_v10_0_mc_init(adev);
985 amdgpu_gmc_get_vbios_allocations(adev);
988 r = amdgpu_bo_init(adev);
992 r = gmc_v10_0_gart_init(adev);
998 * VMID 0 is reserved for System
999 * amdgpu graphics/compute will use VMIDs 1-7
1000 * amdkfd will use VMIDs 8-15
1002 adev->vm_manager.first_kfd_vmid = 8;
1004 amdgpu_vm_manager_init(adev);
1006 r = amdgpu_gmc_ras_sw_init(adev);
1014 * gmc_v10_0_gart_fini - vm fini callback
1016 * @adev: amdgpu_device pointer
1018 * Tears down the driver GART/VM setup (CIK).
1020 static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
1022 amdgpu_gart_table_vram_free(adev);
1025 static int gmc_v10_0_sw_fini(void *handle)
1027 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1029 amdgpu_vm_manager_fini(adev);
1030 gmc_v10_0_gart_fini(adev);
1031 amdgpu_gem_force_release(adev);
1032 amdgpu_bo_fini(adev);
1037 static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
1042 * gmc_v10_0_gart_enable - gart enable
1044 * @adev: amdgpu_device pointer
1046 static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
1051 if (adev->gart.bo == NULL) {
1052 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1056 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
1058 if (!adev->in_s0ix) {
1059 r = adev->gfxhub.funcs->gart_enable(adev);
1064 r = adev->mmhub.funcs->gart_enable(adev);
1068 adev->hdp.funcs->init_registers(adev);
1070 /* Flush HDP after it is initialized */
1071 adev->hdp.funcs->flush_hdp(adev, NULL);
1073 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
1077 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
1078 adev->mmhub.funcs->set_fault_enable_default(adev, value);
1079 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
1081 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0);
1083 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1084 (unsigned)(adev->gmc.gart_size >> 20),
1085 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1090 static int gmc_v10_0_hw_init(void *handle)
1093 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1095 /* The sequence of these two function calls matters.*/
1096 gmc_v10_0_init_golden_registers(adev);
1099 * harvestable groups in gc_utcl2 need to be programmed before any GFX block
1100 * register setup within GMC, or else system hang when harvesting SA.
1102 if (!adev->in_s0ix && adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest)
1103 adev->gfxhub.funcs->utcl2_harvest(adev);
1105 r = gmc_v10_0_gart_enable(adev);
1109 if (amdgpu_emu_mode == 1) {
1110 r = amdgpu_gmc_vram_checking(adev);
1115 if (adev->umc.funcs && adev->umc.funcs->init_registers)
1116 adev->umc.funcs->init_registers(adev);
1122 * gmc_v10_0_gart_disable - gart disable
1124 * @adev: amdgpu_device pointer
1126 * This disables all VM page table.
1128 static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
1131 adev->gfxhub.funcs->gart_disable(adev);
1132 adev->mmhub.funcs->gart_disable(adev);
1135 static int gmc_v10_0_hw_fini(void *handle)
1137 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1139 gmc_v10_0_gart_disable(adev);
1141 if (amdgpu_sriov_vf(adev)) {
1142 /* full access mode, so don't touch any GMC register */
1143 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1147 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1152 static int gmc_v10_0_suspend(void *handle)
1154 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1156 gmc_v10_0_hw_fini(adev);
1161 static int gmc_v10_0_resume(void *handle)
1164 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1166 r = gmc_v10_0_hw_init(adev);
1170 amdgpu_vmid_reset_all(adev);
1175 static bool gmc_v10_0_is_idle(void *handle)
1177 /* MC is always ready in GMC v10.*/
1181 static int gmc_v10_0_wait_for_idle(void *handle)
1183 /* There is no need to wait for MC idle in GMC v10.*/
1187 static int gmc_v10_0_soft_reset(void *handle)
1192 static int gmc_v10_0_set_clockgating_state(void *handle,
1193 enum amd_clockgating_state state)
1196 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1199 * The issue mmhub can't disconnect from DF with MMHUB clock gating being disabled
1200 * is a new problem observed at DF 3.0.3, however with the same suspend sequence not
1201 * seen any issue on the DF 3.0.2 series platform.
1203 if (adev->in_s0ix && adev->ip_versions[DF_HWIP][0] > IP_VERSION(3, 0, 2)) {
1204 dev_dbg(adev->dev, "keep mmhub clock gating being enabled for s0ix\n");
1208 r = adev->mmhub.funcs->set_clockgating(adev, state);
1212 if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0))
1213 return athub_v2_1_set_clockgating(adev, state);
1215 return athub_v2_0_set_clockgating(adev, state);
1218 static void gmc_v10_0_get_clockgating_state(void *handle, u64 *flags)
1220 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1222 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 3) ||
1223 adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 4))
1226 adev->mmhub.funcs->get_clockgating(adev, flags);
1228 if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0))
1229 athub_v2_1_get_clockgating(adev, flags);
1231 athub_v2_0_get_clockgating(adev, flags);
1234 static int gmc_v10_0_set_powergating_state(void *handle,
1235 enum amd_powergating_state state)
1240 const struct amd_ip_funcs gmc_v10_0_ip_funcs = {
1241 .name = "gmc_v10_0",
1242 .early_init = gmc_v10_0_early_init,
1243 .late_init = gmc_v10_0_late_init,
1244 .sw_init = gmc_v10_0_sw_init,
1245 .sw_fini = gmc_v10_0_sw_fini,
1246 .hw_init = gmc_v10_0_hw_init,
1247 .hw_fini = gmc_v10_0_hw_fini,
1248 .suspend = gmc_v10_0_suspend,
1249 .resume = gmc_v10_0_resume,
1250 .is_idle = gmc_v10_0_is_idle,
1251 .wait_for_idle = gmc_v10_0_wait_for_idle,
1252 .soft_reset = gmc_v10_0_soft_reset,
1253 .set_clockgating_state = gmc_v10_0_set_clockgating_state,
1254 .set_powergating_state = gmc_v10_0_set_powergating_state,
1255 .get_clockgating_state = gmc_v10_0_get_clockgating_state,
1258 const struct amdgpu_ip_block_version gmc_v10_0_ip_block =
1260 .type = AMD_IP_BLOCK_TYPE_GMC,
1264 .funcs = &gmc_v10_0_ip_funcs,