2 * Copyright 2021 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
26 #include <drm/drm_cache.h>
29 #include "amdgpu_atomfirmware.h"
30 #include "gmc_v11_0.h"
31 #include "umc_v8_10.h"
32 #include "athub/athub_3_0_0_sh_mask.h"
33 #include "athub/athub_3_0_0_offset.h"
34 #include "dcn/dcn_3_2_0_offset.h"
35 #include "dcn/dcn_3_2_0_sh_mask.h"
36 #include "oss/osssys_6_0_0_offset.h"
37 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
38 #include "navi10_enum.h"
41 #include "soc15_common.h"
42 #include "nbio_v4_3.h"
43 #include "gfxhub_v3_0.h"
44 #include "gfxhub_v3_0_3.h"
45 #include "gfxhub_v11_5_0.h"
46 #include "mmhub_v3_0.h"
47 #include "mmhub_v3_0_1.h"
48 #include "mmhub_v3_0_2.h"
49 #include "mmhub_v3_3.h"
50 #include "athub_v3_0.h"
53 static int gmc_v11_0_ecc_interrupt_state(struct amdgpu_device *adev,
54 struct amdgpu_irq_src *src,
56 enum amdgpu_interrupt_state state)
62 gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
63 struct amdgpu_irq_src *src, unsigned int type,
64 enum amdgpu_interrupt_state state)
67 case AMDGPU_IRQ_STATE_DISABLE:
69 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false);
71 /* This works because this interrupt is only
72 * enabled at init/resume and disabled in
73 * fini/suspend, so the overall state doesn't
74 * change over the course of suspend/resume.
77 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false);
79 case AMDGPU_IRQ_STATE_ENABLE:
81 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true);
83 /* This works because this interrupt is only
84 * enabled at init/resume and disabled in
85 * fini/suspend, so the overall state doesn't
86 * change over the course of suspend/resume.
89 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true);
98 static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
99 struct amdgpu_irq_src *source,
100 struct amdgpu_iv_entry *entry)
102 uint32_t vmhub_index = entry->client_id == SOC21_IH_CLIENTID_VMC ?
103 AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0);
104 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index];
108 addr = (u64)entry->src_data[0] << 12;
109 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
111 if (!amdgpu_sriov_vf(adev)) {
113 * Issue a dummy read to wait for the status register to
114 * be updated to avoid reading an incorrect value due to
115 * the new fast GRBM interface.
117 if (entry->vmid_src == AMDGPU_GFXHUB(0))
118 RREG32(hub->vm_l2_pro_fault_status);
120 status = RREG32(hub->vm_l2_pro_fault_status);
121 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
124 if (printk_ratelimit()) {
125 struct amdgpu_task_info task_info;
127 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
128 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
131 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n",
132 entry->vmid_src ? "mmhub" : "gfxhub",
133 entry->src_id, entry->ring_id, entry->vmid,
134 entry->pasid, task_info.process_name, task_info.tgid,
135 task_info.task_name, task_info.pid);
136 dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",
137 addr, entry->client_id);
138 if (!amdgpu_sriov_vf(adev))
139 hub->vmhub_funcs->print_l2_protection_fault_status(adev, status);
145 static const struct amdgpu_irq_src_funcs gmc_v11_0_irq_funcs = {
146 .set = gmc_v11_0_vm_fault_interrupt_state,
147 .process = gmc_v11_0_process_interrupt,
150 static const struct amdgpu_irq_src_funcs gmc_v11_0_ecc_funcs = {
151 .set = gmc_v11_0_ecc_interrupt_state,
152 .process = amdgpu_umc_process_ecc_irq,
155 static void gmc_v11_0_set_irq_funcs(struct amdgpu_device *adev)
157 adev->gmc.vm_fault.num_types = 1;
158 adev->gmc.vm_fault.funcs = &gmc_v11_0_irq_funcs;
160 if (!amdgpu_sriov_vf(adev)) {
161 adev->gmc.ecc_irq.num_types = 1;
162 adev->gmc.ecc_irq.funcs = &gmc_v11_0_ecc_funcs;
167 * gmc_v11_0_use_invalidate_semaphore - judge whether to use semaphore
169 * @adev: amdgpu_device pointer
173 static bool gmc_v11_0_use_invalidate_semaphore(struct amdgpu_device *adev,
176 return ((vmhub == AMDGPU_MMHUB0(0)) &&
177 (!amdgpu_sriov_vf(adev)));
180 static bool gmc_v11_0_get_vmid_pasid_mapping_info(
181 struct amdgpu_device *adev,
182 uint8_t vmid, uint16_t *p_pasid)
184 *p_pasid = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid) & 0xffff;
191 * VMID 0 is the physical GPU addresses as used by the kernel.
192 * VMIDs 1-15 are used for userspace clients and are handled
193 * by the amdgpu vm/hsa code.
196 static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
197 unsigned int vmhub, uint32_t flush_type)
199 bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(adev, vmhub);
200 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
201 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
203 /* Use register 17 for GART */
204 const unsigned int eng = 17;
206 unsigned char hub_ip = 0;
208 hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ?
209 GC_HWIP : MMHUB_HWIP;
211 spin_lock(&adev->gmc.invalidate_lock);
213 * It may lose gpuvm invalidate acknowldege state across power-gating
214 * off cycle, add semaphore acquire before invalidation and semaphore
215 * release after invalidation to avoid entering power gated state
219 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
221 for (i = 0; i < adev->usec_timeout; i++) {
222 /* a read return value of 1 means semaphore acuqire */
223 tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
224 hub->eng_distance * eng, hub_ip);
230 if (i >= adev->usec_timeout)
231 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
234 WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req, hub_ip);
236 /* Wait for ACK with a delay.*/
237 for (i = 0; i < adev->usec_timeout; i++) {
238 tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack +
239 hub->eng_distance * eng, hub_ip);
247 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
250 * add semaphore release after invalidation,
251 * write with 0 means semaphore release
253 WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
254 hub->eng_distance * eng, 0, hub_ip);
256 /* Issue additional private vm invalidation to MMHUB */
257 if ((vmhub != AMDGPU_GFXHUB(0)) &&
258 (hub->vm_l2_bank_select_reserved_cid2) &&
259 !amdgpu_sriov_vf(adev)) {
260 inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
261 /* bit 25: RSERVED_CACHE_PRIVATE_INVALIDATION */
262 inv_req |= (1 << 25);
263 /* Issue private invalidation */
264 WREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2, inv_req);
265 /* Read back to ensure invalidation is done*/
266 RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
269 spin_unlock(&adev->gmc.invalidate_lock);
271 if (i < adev->usec_timeout)
274 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
278 * gmc_v11_0_flush_gpu_tlb - gart tlb flush callback
280 * @adev: amdgpu_device pointer
281 * @vmid: vm instance to flush
282 * @vmhub: which hub to flush
283 * @flush_type: the flush type
285 * Flush the TLB for the requested page table.
287 static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
288 uint32_t vmhub, uint32_t flush_type)
290 if ((vmhub == AMDGPU_GFXHUB(0)) && !adev->gfx.is_poweron)
293 /* flush hdp cache */
294 adev->hdp.funcs->flush_hdp(adev, NULL);
296 /* For SRIOV run time, driver shouldn't access the register through MMIO
297 * Directly use kiq to do the vm invalidation instead
299 if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) &&
300 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
301 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
302 const unsigned int eng = 17;
303 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
304 u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
305 u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
307 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
312 mutex_lock(&adev->mman.gtt_window_lock);
313 gmc_v11_0_flush_vm_hub(adev, vmid, vmhub, 0);
314 mutex_unlock(&adev->mman.gtt_window_lock);
318 * gmc_v11_0_flush_gpu_tlb_pasid - tlb flush via pasid
320 * @adev: amdgpu_device pointer
321 * @pasid: pasid to be flush
322 * @flush_type: the flush type
323 * @all_hub: flush all hubs
324 * @inst: is used to select which instance of KIQ to use for the invalidation
326 * Flush the TLB for the requested pasid.
328 static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
329 uint16_t pasid, uint32_t flush_type,
330 bool all_hub, uint32_t inst)
335 uint16_t queried_pasid;
337 struct amdgpu_ring *ring = &adev->gfx.kiq[0].ring;
338 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
340 if (amdgpu_emu_mode == 0 && ring->sched.ready) {
341 spin_lock(&adev->gfx.kiq[0].ring_lock);
342 /* 2 dwords flush + 8 dwords fence */
343 amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
344 kiq->pmf->kiq_invalidate_tlbs(ring,
345 pasid, flush_type, all_hub);
346 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
348 amdgpu_ring_undo(ring);
349 spin_unlock(&adev->gfx.kiq[0].ring_lock);
353 amdgpu_ring_commit(ring);
354 spin_unlock(&adev->gfx.kiq[0].ring_lock);
355 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
357 dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
364 for (vmid = 1; vmid < 16; vmid++) {
366 ret = gmc_v11_0_get_vmid_pasid_mapping_info(adev, vmid,
368 if (ret && queried_pasid == pasid) {
370 for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS)
371 gmc_v11_0_flush_gpu_tlb(adev, vmid,
374 gmc_v11_0_flush_gpu_tlb(adev, vmid,
375 AMDGPU_GFXHUB(0), flush_type);
383 static uint64_t gmc_v11_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
384 unsigned int vmid, uint64_t pd_addr)
386 bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
387 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
388 uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
389 unsigned int eng = ring->vm_inv_eng;
392 * It may lose gpuvm invalidate acknowldege state across power-gating
393 * off cycle, add semaphore acquire before invalidation and semaphore
394 * release after invalidation to avoid entering power gated state
398 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
400 /* a read return value of 1 means semaphore acuqire */
401 amdgpu_ring_emit_reg_wait(ring,
402 hub->vm_inv_eng0_sem +
403 hub->eng_distance * eng, 0x1, 0x1);
405 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
406 (hub->ctx_addr_distance * vmid),
407 lower_32_bits(pd_addr));
409 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
410 (hub->ctx_addr_distance * vmid),
411 upper_32_bits(pd_addr));
413 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
414 hub->eng_distance * eng,
415 hub->vm_inv_eng0_ack +
416 hub->eng_distance * eng,
419 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
422 * add semaphore release after invalidation,
423 * write with 0 means semaphore release
425 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
426 hub->eng_distance * eng, 0);
431 static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
434 struct amdgpu_device *adev = ring->adev;
437 /* MES fw manages IH_VMID_x_LUT updating */
438 if (ring->is_mes_queue)
441 if (ring->vm_hub == AMDGPU_GFXHUB(0))
442 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
444 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid;
446 amdgpu_ring_emit_wreg(ring, reg, pasid);
459 * 47:12 4k physical page base address
470 * 63:59 block fragment size
474 * 47:6 physical base address of PD or PTE
481 static uint64_t gmc_v11_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
484 case AMDGPU_VM_MTYPE_DEFAULT:
485 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
486 case AMDGPU_VM_MTYPE_NC:
487 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
488 case AMDGPU_VM_MTYPE_WC:
489 return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
490 case AMDGPU_VM_MTYPE_CC:
491 return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
492 case AMDGPU_VM_MTYPE_UC:
493 return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
495 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
499 static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level,
500 uint64_t *addr, uint64_t *flags)
502 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
503 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
504 BUG_ON(*addr & 0xFFFF00000000003FULL);
506 if (!adev->gmc.translate_further)
509 if (level == AMDGPU_VM_PDB1) {
510 /* Set the block fragment size */
511 if (!(*flags & AMDGPU_PDE_PTE))
512 *flags |= AMDGPU_PDE_BFS(0x9);
514 } else if (level == AMDGPU_VM_PDB0) {
515 if (*flags & AMDGPU_PDE_PTE)
516 *flags &= ~AMDGPU_PDE_PTE;
518 *flags |= AMDGPU_PTE_TF;
522 static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
523 struct amdgpu_bo_va_mapping *mapping,
526 struct amdgpu_bo *bo = mapping->bo_va->base.bo;
528 *flags &= ~AMDGPU_PTE_EXECUTABLE;
529 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
531 *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
532 *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
534 *flags &= ~AMDGPU_PTE_NOALLOC;
535 *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
537 if (mapping->flags & AMDGPU_PTE_PRT) {
538 *flags |= AMDGPU_PTE_PRT;
539 *flags |= AMDGPU_PTE_SNOOPED;
540 *flags |= AMDGPU_PTE_LOG;
541 *flags |= AMDGPU_PTE_SYSTEM;
542 *flags &= ~AMDGPU_PTE_VALID;
545 if (bo && bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
546 AMDGPU_GEM_CREATE_UNCACHED))
547 *flags = (*flags & ~AMDGPU_PTE_MTYPE_NV10_MASK) |
548 AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
551 static unsigned int gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev)
553 u32 d1vga_control = RREG32_SOC15(DCE, 0, regD1VGA_CONTROL);
556 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
557 size = AMDGPU_VBIOS_VGA_ALLOCATION;
562 viewport = RREG32_SOC15(DCE, 0, regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
563 pitch = RREG32_SOC15(DCE, 0, regHUBPREQ0_DCSURF_SURFACE_PITCH);
564 size = (REG_GET_FIELD(viewport,
565 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
566 REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
573 static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = {
574 .flush_gpu_tlb = gmc_v11_0_flush_gpu_tlb,
575 .flush_gpu_tlb_pasid = gmc_v11_0_flush_gpu_tlb_pasid,
576 .emit_flush_gpu_tlb = gmc_v11_0_emit_flush_gpu_tlb,
577 .emit_pasid_mapping = gmc_v11_0_emit_pasid_mapping,
578 .map_mtype = gmc_v11_0_map_mtype,
579 .get_vm_pde = gmc_v11_0_get_vm_pde,
580 .get_vm_pte = gmc_v11_0_get_vm_pte,
581 .get_vbios_fb_size = gmc_v11_0_get_vbios_fb_size,
584 static void gmc_v11_0_set_gmc_funcs(struct amdgpu_device *adev)
586 adev->gmc.gmc_funcs = &gmc_v11_0_gmc_funcs;
589 static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev)
591 switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
592 case IP_VERSION(8, 10, 0):
593 adev->umc.channel_inst_num = UMC_V8_10_CHANNEL_INSTANCE_NUM;
594 adev->umc.umc_inst_num = UMC_V8_10_UMC_INSTANCE_NUM;
595 adev->umc.max_ras_err_cnt_per_query = UMC_V8_10_TOTAL_CHANNEL_NUM(adev);
596 adev->umc.channel_offs = UMC_V8_10_PER_CHANNEL_OFFSET;
597 adev->umc.retire_unit = UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM;
598 if (adev->umc.node_inst_num == 4)
599 adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl_ext0[0][0][0];
601 adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl[0][0][0];
602 adev->umc.ras = &umc_v8_10_ras;
604 case IP_VERSION(8, 11, 0):
612 static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev)
614 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
615 case IP_VERSION(3, 0, 1):
616 adev->mmhub.funcs = &mmhub_v3_0_1_funcs;
618 case IP_VERSION(3, 0, 2):
619 adev->mmhub.funcs = &mmhub_v3_0_2_funcs;
621 case IP_VERSION(3, 3, 0):
622 adev->mmhub.funcs = &mmhub_v3_3_funcs;
625 adev->mmhub.funcs = &mmhub_v3_0_funcs;
630 static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev)
632 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
633 case IP_VERSION(11, 0, 3):
634 adev->gfxhub.funcs = &gfxhub_v3_0_3_funcs;
636 case IP_VERSION(11, 5, 0):
637 adev->gfxhub.funcs = &gfxhub_v11_5_0_funcs;
640 adev->gfxhub.funcs = &gfxhub_v3_0_funcs;
645 static int gmc_v11_0_early_init(void *handle)
647 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
649 gmc_v11_0_set_gfxhub_funcs(adev);
650 gmc_v11_0_set_mmhub_funcs(adev);
651 gmc_v11_0_set_gmc_funcs(adev);
652 gmc_v11_0_set_irq_funcs(adev);
653 gmc_v11_0_set_umc_funcs(adev);
655 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
656 adev->gmc.shared_aperture_end =
657 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
658 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
659 adev->gmc.private_aperture_end =
660 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
661 adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
666 static int gmc_v11_0_late_init(void *handle)
668 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
671 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
675 r = amdgpu_gmc_ras_late_init(adev);
679 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
682 static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev,
683 struct amdgpu_gmc *mc)
687 base = adev->mmhub.funcs->get_fb_location(adev);
689 amdgpu_gmc_vram_location(adev, &adev->gmc, base);
690 amdgpu_gmc_gart_location(adev, mc);
691 amdgpu_gmc_agp_location(adev, mc);
693 /* base offset of vram pages */
694 if (amdgpu_sriov_vf(adev))
695 adev->vm_manager.vram_base_offset = 0;
697 adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev);
701 * gmc_v11_0_mc_init - initialize the memory controller driver params
703 * @adev: amdgpu_device pointer
705 * Look up the amount of vram, vram width, and decide how to place
706 * vram and gart within the GPU's physical address space.
707 * Returns 0 for success.
709 static int gmc_v11_0_mc_init(struct amdgpu_device *adev)
713 /* size in MB on si */
714 adev->gmc.mc_vram_size =
715 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
716 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
718 if (!(adev->flags & AMD_IS_APU)) {
719 r = amdgpu_device_resize_fb_bar(adev);
723 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
724 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
727 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
728 adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev);
729 adev->gmc.aper_size = adev->gmc.real_vram_size;
732 /* In case the PCI BAR is larger than the actual amount of vram */
733 adev->gmc.visible_vram_size = adev->gmc.aper_size;
734 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
735 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
737 /* set the gart size */
738 if (amdgpu_gart_size == -1)
739 adev->gmc.gart_size = 512ULL << 20;
741 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
743 gmc_v11_0_vram_gtt_location(adev, &adev->gmc);
748 static int gmc_v11_0_gart_init(struct amdgpu_device *adev)
753 WARN(1, "PCIE GART already initialized\n");
757 /* Initialize common gart structure */
758 r = amdgpu_gart_init(adev);
762 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
763 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
764 AMDGPU_PTE_EXECUTABLE;
766 return amdgpu_gart_table_vram_alloc(adev);
769 static int gmc_v11_0_sw_init(void *handle)
771 int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
772 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
774 adev->mmhub.funcs->init(adev);
776 spin_lock_init(&adev->gmc.invalidate_lock);
778 r = amdgpu_atomfirmware_get_vram_info(adev,
779 &vram_width, &vram_type, &vram_vendor);
780 adev->gmc.vram_width = vram_width;
782 adev->gmc.vram_type = vram_type;
783 adev->gmc.vram_vendor = vram_vendor;
785 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
786 case IP_VERSION(11, 0, 0):
787 case IP_VERSION(11, 0, 1):
788 case IP_VERSION(11, 0, 2):
789 case IP_VERSION(11, 0, 3):
790 case IP_VERSION(11, 0, 4):
791 case IP_VERSION(11, 5, 0):
792 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
793 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
795 * To fulfill 4-level page support,
796 * vm size is 256TB (48bit), maximum size,
797 * block size 512 (9bit)
799 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
805 /* This interrupt is VMC page fault.*/
806 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_VMC,
807 VMC_1_0__SRCID__VM_FAULT,
808 &adev->gmc.vm_fault);
813 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
814 UTCL2_1_0__SRCID__FAULT,
815 &adev->gmc.vm_fault);
819 if (!amdgpu_sriov_vf(adev)) {
820 /* interrupt sent to DF. */
821 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_DF, 0,
828 * Set the internal MC address mask This is the max address of the GPU's
829 * internal address space.
831 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
833 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
835 dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
839 adev->need_swiotlb = drm_need_swiotlb(44);
841 r = gmc_v11_0_mc_init(adev);
845 amdgpu_gmc_get_vbios_allocations(adev);
848 r = amdgpu_bo_init(adev);
852 r = gmc_v11_0_gart_init(adev);
858 * VMID 0 is reserved for System
859 * amdgpu graphics/compute will use VMIDs 1-7
860 * amdkfd will use VMIDs 8-15
862 adev->vm_manager.first_kfd_vmid = 8;
864 amdgpu_vm_manager_init(adev);
866 r = amdgpu_gmc_ras_sw_init(adev);
874 * gmc_v11_0_gart_fini - vm fini callback
876 * @adev: amdgpu_device pointer
878 * Tears down the driver GART/VM setup (CIK).
880 static void gmc_v11_0_gart_fini(struct amdgpu_device *adev)
882 amdgpu_gart_table_vram_free(adev);
885 static int gmc_v11_0_sw_fini(void *handle)
887 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
889 amdgpu_vm_manager_fini(adev);
890 gmc_v11_0_gart_fini(adev);
891 amdgpu_gem_force_release(adev);
892 amdgpu_bo_fini(adev);
897 static void gmc_v11_0_init_golden_registers(struct amdgpu_device *adev)
899 if (amdgpu_sriov_vf(adev)) {
900 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
902 WREG32(hub->vm_contexts_disable, 0);
908 * gmc_v11_0_gart_enable - gart enable
910 * @adev: amdgpu_device pointer
912 static int gmc_v11_0_gart_enable(struct amdgpu_device *adev)
917 if (adev->gart.bo == NULL) {
918 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
922 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
924 r = adev->mmhub.funcs->gart_enable(adev);
928 /* Flush HDP after it is initialized */
929 adev->hdp.funcs->flush_hdp(adev, NULL);
931 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
934 adev->mmhub.funcs->set_fault_enable_default(adev, value);
935 gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
937 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
938 (unsigned int)(adev->gmc.gart_size >> 20),
939 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
944 static int gmc_v11_0_hw_init(void *handle)
947 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
949 /* The sequence of these two function calls matters.*/
950 gmc_v11_0_init_golden_registers(adev);
952 r = gmc_v11_0_gart_enable(adev);
956 if (adev->umc.funcs && adev->umc.funcs->init_registers)
957 adev->umc.funcs->init_registers(adev);
963 * gmc_v11_0_gart_disable - gart disable
965 * @adev: amdgpu_device pointer
967 * This disables all VM page table.
969 static void gmc_v11_0_gart_disable(struct amdgpu_device *adev)
971 adev->mmhub.funcs->gart_disable(adev);
974 static int gmc_v11_0_hw_fini(void *handle)
976 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
978 if (amdgpu_sriov_vf(adev)) {
979 /* full access mode, so don't touch any GMC register */
980 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
984 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
985 gmc_v11_0_gart_disable(adev);
990 static int gmc_v11_0_suspend(void *handle)
992 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
994 gmc_v11_0_hw_fini(adev);
999 static int gmc_v11_0_resume(void *handle)
1002 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1004 r = gmc_v11_0_hw_init(adev);
1008 amdgpu_vmid_reset_all(adev);
1013 static bool gmc_v11_0_is_idle(void *handle)
1015 /* MC is always ready in GMC v11.*/
1019 static int gmc_v11_0_wait_for_idle(void *handle)
1021 /* There is no need to wait for MC idle in GMC v11.*/
1025 static int gmc_v11_0_soft_reset(void *handle)
1030 static int gmc_v11_0_set_clockgating_state(void *handle,
1031 enum amd_clockgating_state state)
1034 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1036 r = adev->mmhub.funcs->set_clockgating(adev, state);
1040 return athub_v3_0_set_clockgating(adev, state);
1043 static void gmc_v11_0_get_clockgating_state(void *handle, u64 *flags)
1045 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1047 adev->mmhub.funcs->get_clockgating(adev, flags);
1049 athub_v3_0_get_clockgating(adev, flags);
1052 static int gmc_v11_0_set_powergating_state(void *handle,
1053 enum amd_powergating_state state)
1058 const struct amd_ip_funcs gmc_v11_0_ip_funcs = {
1059 .name = "gmc_v11_0",
1060 .early_init = gmc_v11_0_early_init,
1061 .sw_init = gmc_v11_0_sw_init,
1062 .hw_init = gmc_v11_0_hw_init,
1063 .late_init = gmc_v11_0_late_init,
1064 .sw_fini = gmc_v11_0_sw_fini,
1065 .hw_fini = gmc_v11_0_hw_fini,
1066 .suspend = gmc_v11_0_suspend,
1067 .resume = gmc_v11_0_resume,
1068 .is_idle = gmc_v11_0_is_idle,
1069 .wait_for_idle = gmc_v11_0_wait_for_idle,
1070 .soft_reset = gmc_v11_0_soft_reset,
1071 .set_clockgating_state = gmc_v11_0_set_clockgating_state,
1072 .set_powergating_state = gmc_v11_0_set_powergating_state,
1073 .get_clockgating_state = gmc_v11_0_get_clockgating_state,
1076 const struct amdgpu_ip_block_version gmc_v11_0_ip_block = {
1077 .type = AMD_IP_BLOCK_TYPE_GMC,
1081 .funcs = &gmc_v11_0_ip_funcs,