2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
27 #include "amdgpu_ucode.h"
29 #include "gmc/gmc_8_1_d.h"
30 #include "gmc/gmc_8_1_sh_mask.h"
32 #include "bif/bif_5_0_d.h"
33 #include "bif/bif_5_0_sh_mask.h"
35 #include "oss/oss_3_0_d.h"
36 #include "oss/oss_3_0_sh_mask.h"
42 static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
43 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
45 MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
47 static const u32 golden_settings_tonga_a11[] =
49 mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
50 mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
51 mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
52 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
53 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
54 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
55 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
58 static const u32 tonga_mgcg_cgcg_init[] =
60 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
63 static const u32 golden_settings_fiji_a10[] =
65 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
66 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
67 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
68 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
71 static const u32 fiji_mgcg_cgcg_init[] =
73 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
76 static const u32 cz_mgcg_cgcg_init[] =
78 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
81 static const u32 stoney_mgcg_cgcg_init[] =
83 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
87 static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
89 switch (adev->asic_type) {
91 amdgpu_program_register_sequence(adev,
93 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
94 amdgpu_program_register_sequence(adev,
95 golden_settings_fiji_a10,
96 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
99 amdgpu_program_register_sequence(adev,
100 tonga_mgcg_cgcg_init,
101 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
102 amdgpu_program_register_sequence(adev,
103 golden_settings_tonga_a11,
104 (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
107 amdgpu_program_register_sequence(adev,
109 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
112 amdgpu_program_register_sequence(adev,
113 stoney_mgcg_cgcg_init,
114 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
122 * gmc8_mc_wait_for_idle - wait for MC idle callback.
124 * @adev: amdgpu_device pointer
126 * Wait for the MC (memory controller) to be idle.
128 * Returns 0 if the MC is idle, -1 if not.
130 int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev)
135 for (i = 0; i < adev->usec_timeout; i++) {
137 tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__VMC_BUSY_MASK |
138 SRBM_STATUS__MCB_BUSY_MASK |
139 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
140 SRBM_STATUS__MCC_BUSY_MASK |
141 SRBM_STATUS__MCD_BUSY_MASK |
142 SRBM_STATUS__VMC1_BUSY_MASK);
150 void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
151 struct amdgpu_mode_mc_save *save)
155 if (adev->mode_info.num_crtc)
156 amdgpu_display_stop_mc_access(adev, save);
158 amdgpu_asic_wait_for_mc_idle(adev);
160 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
161 if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
162 /* Block CPU access */
163 WREG32(mmBIF_FB_EN, 0);
164 /* blackout the MC */
165 blackout = REG_SET_FIELD(blackout,
166 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1);
167 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
169 /* wait for the MC to settle */
173 void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
174 struct amdgpu_mode_mc_save *save)
178 /* unblackout the MC */
179 tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
180 tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
181 WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
182 /* allow CPU access */
183 tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
184 tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
185 WREG32(mmBIF_FB_EN, tmp);
187 if (adev->mode_info.num_crtc)
188 amdgpu_display_resume_mc_access(adev, save);
192 * gmc_v8_0_init_microcode - load ucode images from disk
194 * @adev: amdgpu_device pointer
196 * Use the firmware interface to load the ucode images into
197 * the driver (not loaded into hw).
198 * Returns 0 on success, error on failure.
200 static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
202 const char *chip_name;
208 switch (adev->asic_type) {
219 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
220 err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
223 err = amdgpu_ucode_validate(adev->mc.fw);
228 "mc: Failed to load firmware \"%s\"\n",
230 release_firmware(adev->mc.fw);
237 * gmc_v8_0_mc_load_microcode - load MC ucode into the hw
239 * @adev: amdgpu_device pointer
241 * Load the GDDR MC ucode into the hw (CIK).
242 * Returns 0 on success, error on failure.
244 static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev)
246 const struct mc_firmware_header_v1_0 *hdr;
247 const __le32 *fw_data = NULL;
248 const __le32 *io_mc_regs = NULL;
249 u32 running, blackout = 0;
250 int i, ucode_size, regs_size;
255 /* Skip MC ucode loading on SR-IOV capable boards.
256 * vbios does this for us in asic_init in that case.
258 if (adev->virtualization.supports_sr_iov)
261 hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
262 amdgpu_ucode_print_mc_hdr(&hdr->header);
264 adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
265 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
266 io_mc_regs = (const __le32 *)
267 (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
268 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
269 fw_data = (const __le32 *)
270 (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
272 running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
276 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
277 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
280 /* reset the engine and set to writable */
281 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
282 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
284 /* load mc io regs */
285 for (i = 0; i < regs_size; i++) {
286 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
287 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
289 /* load the MC ucode */
290 for (i = 0; i < ucode_size; i++)
291 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
293 /* put the engine back into the active state */
294 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
295 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
296 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
298 /* wait for training to complete */
299 for (i = 0; i < adev->usec_timeout; i++) {
300 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
301 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
305 for (i = 0; i < adev->usec_timeout; i++) {
306 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
307 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
313 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
319 static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
320 struct amdgpu_mc *mc)
322 if (mc->mc_vram_size > 0xFFC0000000ULL) {
323 /* leave room for at least 1024M GTT */
324 dev_warn(adev->dev, "limiting VRAM\n");
325 mc->real_vram_size = 0xFFC0000000ULL;
326 mc->mc_vram_size = 0xFFC0000000ULL;
328 amdgpu_vram_location(adev, &adev->mc, 0);
329 adev->mc.gtt_base_align = 0;
330 amdgpu_gtt_location(adev, mc);
334 * gmc_v8_0_mc_program - program the GPU memory controller
336 * @adev: amdgpu_device pointer
338 * Set the location of vram, gart, and AGP in the GPU's
339 * physical address space (CIK).
341 static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
343 struct amdgpu_mode_mc_save save;
348 for (i = 0, j = 0; i < 32; i++, j += 0x6) {
349 WREG32((0xb05 + j), 0x00000000);
350 WREG32((0xb06 + j), 0x00000000);
351 WREG32((0xb07 + j), 0x00000000);
352 WREG32((0xb08 + j), 0x00000000);
353 WREG32((0xb09 + j), 0x00000000);
355 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
357 if (adev->mode_info.num_crtc)
358 amdgpu_display_set_vga_render_state(adev, false);
360 gmc_v8_0_mc_stop(adev, &save);
361 if (amdgpu_asic_wait_for_mc_idle(adev)) {
362 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
364 /* Update configuration */
365 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
366 adev->mc.vram_start >> 12);
367 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
368 adev->mc.vram_end >> 12);
369 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
370 adev->vram_scratch.gpu_addr >> 12);
371 tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
372 tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
373 WREG32(mmMC_VM_FB_LOCATION, tmp);
374 /* XXX double check these! */
375 WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
376 WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
377 WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
378 WREG32(mmMC_VM_AGP_BASE, 0);
379 WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
380 WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
381 if (amdgpu_asic_wait_for_mc_idle(adev)) {
382 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
384 gmc_v8_0_mc_resume(adev, &save);
386 WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
388 tmp = RREG32(mmHDP_MISC_CNTL);
389 tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
390 WREG32(mmHDP_MISC_CNTL, tmp);
392 tmp = RREG32(mmHDP_HOST_PATH_CNTL);
393 WREG32(mmHDP_HOST_PATH_CNTL, tmp);
397 * gmc_v8_0_mc_init - initialize the memory controller driver params
399 * @adev: amdgpu_device pointer
401 * Look up the amount of vram, vram width, and decide how to place
402 * vram and gart within the GPU's physical address space (CIK).
403 * Returns 0 for success.
405 static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
408 int chansize, numchan;
410 /* Get VRAM informations */
411 tmp = RREG32(mmMC_ARB_RAMCFG);
412 if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
417 tmp = RREG32(mmMC_SHARED_CHMAP);
418 switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
448 adev->mc.vram_width = numchan * chansize;
449 /* Could aper size report 0 ? */
450 adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
451 adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
452 /* size in MB on si */
453 adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
454 adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
455 adev->mc.visible_vram_size = adev->mc.aper_size;
457 /* In case the PCI BAR is larger than the actual amount of vram */
458 if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
459 adev->mc.visible_vram_size = adev->mc.real_vram_size;
461 /* unless the user had overridden it, set the gart
462 * size equal to the 1024 or vram, whichever is larger.
464 if (amdgpu_gart_size == -1)
465 adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
467 adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
469 gmc_v8_0_vram_gtt_location(adev, &adev->mc);
476 * VMID 0 is the physical GPU addresses as used by the kernel.
477 * VMIDs 1-15 are used for userspace clients and are handled
478 * by the amdgpu vm/hsa code.
482 * gmc_v8_0_gart_flush_gpu_tlb - gart tlb flush callback
484 * @adev: amdgpu_device pointer
485 * @vmid: vm instance to flush
487 * Flush the TLB for the requested page table (CIK).
489 static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
492 /* flush hdp cache */
493 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
495 /* bits 0-15 are the VM contexts0-15 */
496 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
500 * gmc_v8_0_gart_set_pte_pde - update the page tables using MMIO
502 * @adev: amdgpu_device pointer
503 * @cpu_pt_addr: cpu address of the page table
504 * @gpu_page_idx: entry in the page table to update
505 * @addr: dst addr to write into pte/pde
506 * @flags: access flags
508 * Update the page tables using the CPU.
510 static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device *adev,
512 uint32_t gpu_page_idx,
516 void __iomem *ptr = (void *)cpu_pt_addr;
522 * 39:12 4k physical page base address
533 * 63:59 block fragment size
535 * 39:1 physical base address of PTE
536 * bits 5:1 must be 0.
539 value = addr & 0x000000FFFFFFF000ULL;
541 writeq(value, ptr + (gpu_page_idx * 8));
547 * gmc_v8_0_set_fault_enable_default - update VM fault handling
549 * @adev: amdgpu_device pointer
550 * @value: true redirects VM faults to the default page
552 static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
557 tmp = RREG32(mmVM_CONTEXT1_CNTL);
558 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
559 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
560 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
561 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
562 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
563 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
564 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
565 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
566 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
567 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
568 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
569 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
570 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
571 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
572 WREG32(mmVM_CONTEXT1_CNTL, tmp);
576 * gmc_v8_0_gart_enable - gart enable
578 * @adev: amdgpu_device pointer
580 * This sets up the TLBs, programs the page tables for VMID0,
581 * sets up the hw for VMIDs 1-15 which are allocated on
582 * demand, and sets up the global locations for the LDS, GDS,
583 * and GPUVM for FSA64 clients (CIK).
584 * Returns 0 for success, errors for failure.
586 static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
591 if (adev->gart.robj == NULL) {
592 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
595 r = amdgpu_gart_table_vram_pin(adev);
598 /* Setup TLB control */
599 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
600 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
601 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
602 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
603 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
604 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
605 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
607 tmp = RREG32(mmVM_L2_CNTL);
608 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
609 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
610 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
611 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
612 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
613 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
614 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
615 WREG32(mmVM_L2_CNTL, tmp);
616 tmp = RREG32(mmVM_L2_CNTL2);
617 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
618 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
619 WREG32(mmVM_L2_CNTL2, tmp);
620 tmp = RREG32(mmVM_L2_CNTL3);
621 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
622 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4);
623 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
624 WREG32(mmVM_L2_CNTL3, tmp);
625 /* XXX: set to enable PTE/PDE in system memory */
626 tmp = RREG32(mmVM_L2_CNTL4);
627 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0);
628 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0);
629 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0);
630 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0);
631 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0);
632 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0);
633 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0);
634 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0);
635 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0);
636 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0);
637 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0);
638 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
639 WREG32(mmVM_L2_CNTL4, tmp);
641 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
642 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
643 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
644 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
645 (u32)(adev->dummy_page.addr >> 12));
646 WREG32(mmVM_CONTEXT0_CNTL2, 0);
647 tmp = RREG32(mmVM_CONTEXT0_CNTL);
648 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
649 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
650 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
651 WREG32(mmVM_CONTEXT0_CNTL, tmp);
653 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0);
654 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0);
655 WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0);
657 /* empty context1-15 */
658 /* FIXME start with 4G, once using 2 level pt switch to full
661 /* set vm size, must be a multiple of 4 */
662 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
663 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
664 for (i = 1; i < 16; i++) {
666 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
667 adev->gart.table_addr >> 12);
669 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
670 adev->gart.table_addr >> 12);
673 /* enable context1-15 */
674 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
675 (u32)(adev->dummy_page.addr >> 12));
676 WREG32(mmVM_CONTEXT1_CNTL2, 4);
677 tmp = RREG32(mmVM_CONTEXT1_CNTL);
678 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
679 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
680 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
681 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
682 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
683 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
684 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
685 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
686 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
687 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
688 amdgpu_vm_block_size - 9);
689 WREG32(mmVM_CONTEXT1_CNTL, tmp);
690 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
691 gmc_v8_0_set_fault_enable_default(adev, false);
693 gmc_v8_0_set_fault_enable_default(adev, true);
695 gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
696 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
697 (unsigned)(adev->mc.gtt_size >> 20),
698 (unsigned long long)adev->gart.table_addr);
699 adev->gart.ready = true;
703 static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
707 if (adev->gart.robj) {
708 WARN(1, "R600 PCIE GART already initialized\n");
711 /* Initialize common gart structure */
712 r = amdgpu_gart_init(adev);
715 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
716 return amdgpu_gart_table_vram_alloc(adev);
720 * gmc_v8_0_gart_disable - gart disable
722 * @adev: amdgpu_device pointer
724 * This disables all VM page table (CIK).
726 static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
730 /* Disable all tables */
731 WREG32(mmVM_CONTEXT0_CNTL, 0);
732 WREG32(mmVM_CONTEXT1_CNTL, 0);
733 /* Setup TLB control */
734 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
735 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
736 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
737 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
738 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
740 tmp = RREG32(mmVM_L2_CNTL);
741 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
742 WREG32(mmVM_L2_CNTL, tmp);
743 WREG32(mmVM_L2_CNTL2, 0);
744 amdgpu_gart_table_vram_unpin(adev);
748 * gmc_v8_0_gart_fini - vm fini callback
750 * @adev: amdgpu_device pointer
752 * Tears down the driver GART/VM setup (CIK).
754 static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
756 amdgpu_gart_table_vram_free(adev);
757 amdgpu_gart_fini(adev);
762 * VMID 0 is the physical GPU addresses as used by the kernel.
763 * VMIDs 1-15 are used for userspace clients and are handled
764 * by the amdgpu vm/hsa code.
767 * gmc_v8_0_vm_init - cik vm init callback
769 * @adev: amdgpu_device pointer
771 * Inits cik specific vm parameters (number of VMs, base of vram for
773 * Returns 0 for success.
775 static int gmc_v8_0_vm_init(struct amdgpu_device *adev)
779 * VMID 0 is reserved for System
780 * amdgpu graphics/compute will use VMIDs 1-7
781 * amdkfd will use VMIDs 8-15
783 adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
784 amdgpu_vm_manager_init(adev);
786 /* base offset of vram pages */
787 if (adev->flags & AMD_IS_APU) {
788 u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
790 adev->vm_manager.vram_base_offset = tmp;
792 adev->vm_manager.vram_base_offset = 0;
798 * gmc_v8_0_vm_fini - cik vm fini callback
800 * @adev: amdgpu_device pointer
802 * Tear down any asic specific VM setup (CIK).
804 static void gmc_v8_0_vm_fini(struct amdgpu_device *adev)
809 * gmc_v8_0_vm_decode_fault - print human readable fault info
811 * @adev: amdgpu_device pointer
812 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
813 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
815 * Print human readable fault information (CIK).
817 static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev,
818 u32 status, u32 addr, u32 mc_client)
821 u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
822 u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
824 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
825 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
827 mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
830 printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
831 protections, vmid, addr,
832 REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
834 "write" : "read", block, mc_client, mc_id);
837 static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type)
839 switch (mc_seq_vram_type) {
840 case MC_SEQ_MISC0__MT__GDDR1:
841 return AMDGPU_VRAM_TYPE_GDDR1;
842 case MC_SEQ_MISC0__MT__DDR2:
843 return AMDGPU_VRAM_TYPE_DDR2;
844 case MC_SEQ_MISC0__MT__GDDR3:
845 return AMDGPU_VRAM_TYPE_GDDR3;
846 case MC_SEQ_MISC0__MT__GDDR4:
847 return AMDGPU_VRAM_TYPE_GDDR4;
848 case MC_SEQ_MISC0__MT__GDDR5:
849 return AMDGPU_VRAM_TYPE_GDDR5;
850 case MC_SEQ_MISC0__MT__HBM:
851 return AMDGPU_VRAM_TYPE_HBM;
852 case MC_SEQ_MISC0__MT__DDR3:
853 return AMDGPU_VRAM_TYPE_DDR3;
855 return AMDGPU_VRAM_TYPE_UNKNOWN;
859 static int gmc_v8_0_early_init(void *handle)
861 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
863 gmc_v8_0_set_gart_funcs(adev);
864 gmc_v8_0_set_irq_funcs(adev);
866 if (adev->flags & AMD_IS_APU) {
867 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
869 u32 tmp = RREG32(mmMC_SEQ_MISC0);
870 tmp &= MC_SEQ_MISC0__MT__MASK;
871 adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
877 static int gmc_v8_0_late_init(void *handle)
879 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
881 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
884 static int gmc_v8_0_sw_init(void *handle)
888 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
890 r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
894 r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
898 /* Adjust VM size here.
899 * Currently set to 4GB ((1 << 20) 4k pages).
900 * Max GPUVM size for cayman and SI is 40 bits.
902 adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
904 /* Set the internal MC address mask
905 * This is the max address of the GPU's
906 * internal address space.
908 adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
910 /* set DMA mask + need_dma32 flags.
911 * PCIE - can handle 40-bits.
912 * IGP - can handle 40-bits
913 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
915 adev->need_dma32 = false;
916 dma_bits = adev->need_dma32 ? 32 : 40;
917 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
919 adev->need_dma32 = true;
921 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
923 r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
925 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
926 printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
929 r = gmc_v8_0_init_microcode(adev);
931 DRM_ERROR("Failed to load mc firmware!\n");
935 r = gmc_v8_0_mc_init(adev);
940 r = amdgpu_bo_init(adev);
944 r = gmc_v8_0_gart_init(adev);
948 if (!adev->vm_manager.enabled) {
949 r = gmc_v8_0_vm_init(adev);
951 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
954 adev->vm_manager.enabled = true;
960 static int gmc_v8_0_sw_fini(void *handle)
962 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
964 if (adev->vm_manager.enabled) {
965 amdgpu_vm_manager_fini(adev);
966 gmc_v8_0_vm_fini(adev);
967 adev->vm_manager.enabled = false;
969 gmc_v8_0_gart_fini(adev);
970 amdgpu_gem_force_release(adev);
971 amdgpu_bo_fini(adev);
976 static int gmc_v8_0_hw_init(void *handle)
979 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
981 gmc_v8_0_init_golden_registers(adev);
983 gmc_v8_0_mc_program(adev);
985 if (adev->asic_type == CHIP_TONGA) {
986 r = gmc_v8_0_mc_load_microcode(adev);
988 DRM_ERROR("Failed to load MC firmware!\n");
993 r = gmc_v8_0_gart_enable(adev);
1000 static int gmc_v8_0_hw_fini(void *handle)
1002 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1004 amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
1005 gmc_v8_0_gart_disable(adev);
1010 static int gmc_v8_0_suspend(void *handle)
1012 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1014 if (adev->vm_manager.enabled) {
1015 gmc_v8_0_vm_fini(adev);
1016 adev->vm_manager.enabled = false;
1018 gmc_v8_0_hw_fini(adev);
1023 static int gmc_v8_0_resume(void *handle)
1026 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1028 r = gmc_v8_0_hw_init(adev);
1032 if (!adev->vm_manager.enabled) {
1033 r = gmc_v8_0_vm_init(adev);
1035 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
1038 adev->vm_manager.enabled = true;
1044 static bool gmc_v8_0_is_idle(void *handle)
1046 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1047 u32 tmp = RREG32(mmSRBM_STATUS);
1049 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1050 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1056 static int gmc_v8_0_wait_for_idle(void *handle)
1060 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1062 for (i = 0; i < adev->usec_timeout; i++) {
1063 /* read MC_STATUS */
1064 tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1065 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1066 SRBM_STATUS__MCC_BUSY_MASK |
1067 SRBM_STATUS__MCD_BUSY_MASK |
1068 SRBM_STATUS__VMC_BUSY_MASK |
1069 SRBM_STATUS__VMC1_BUSY_MASK);
1078 static void gmc_v8_0_print_status(void *handle)
1081 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1083 dev_info(adev->dev, "GMC 8.x registers\n");
1084 dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
1085 RREG32(mmSRBM_STATUS));
1086 dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
1087 RREG32(mmSRBM_STATUS2));
1089 dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1090 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
1091 dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1092 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
1093 dev_info(adev->dev, " MC_VM_MX_L1_TLB_CNTL=0x%08X\n",
1094 RREG32(mmMC_VM_MX_L1_TLB_CNTL));
1095 dev_info(adev->dev, " VM_L2_CNTL=0x%08X\n",
1096 RREG32(mmVM_L2_CNTL));
1097 dev_info(adev->dev, " VM_L2_CNTL2=0x%08X\n",
1098 RREG32(mmVM_L2_CNTL2));
1099 dev_info(adev->dev, " VM_L2_CNTL3=0x%08X\n",
1100 RREG32(mmVM_L2_CNTL3));
1101 dev_info(adev->dev, " VM_L2_CNTL4=0x%08X\n",
1102 RREG32(mmVM_L2_CNTL4));
1103 dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n",
1104 RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR));
1105 dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n",
1106 RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR));
1107 dev_info(adev->dev, " VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
1108 RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR));
1109 dev_info(adev->dev, " VM_CONTEXT0_CNTL2=0x%08X\n",
1110 RREG32(mmVM_CONTEXT0_CNTL2));
1111 dev_info(adev->dev, " VM_CONTEXT0_CNTL=0x%08X\n",
1112 RREG32(mmVM_CONTEXT0_CNTL));
1113 dev_info(adev->dev, " VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR=0x%08X\n",
1114 RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR));
1115 dev_info(adev->dev, " VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR=0x%08X\n",
1116 RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR));
1117 dev_info(adev->dev, " mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET=0x%08X\n",
1118 RREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET));
1119 dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n",
1120 RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR));
1121 dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n",
1122 RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR));
1123 dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
1124 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR));
1125 dev_info(adev->dev, " VM_CONTEXT1_CNTL2=0x%08X\n",
1126 RREG32(mmVM_CONTEXT1_CNTL2));
1127 dev_info(adev->dev, " VM_CONTEXT1_CNTL=0x%08X\n",
1128 RREG32(mmVM_CONTEXT1_CNTL));
1129 for (i = 0; i < 16; i++) {
1131 dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
1132 i, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i));
1134 dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
1135 i, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8));
1137 dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n",
1138 RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR));
1139 dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n",
1140 RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR));
1141 dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n",
1142 RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR));
1143 dev_info(adev->dev, " MC_VM_FB_LOCATION=0x%08X\n",
1144 RREG32(mmMC_VM_FB_LOCATION));
1145 dev_info(adev->dev, " MC_VM_AGP_BASE=0x%08X\n",
1146 RREG32(mmMC_VM_AGP_BASE));
1147 dev_info(adev->dev, " MC_VM_AGP_TOP=0x%08X\n",
1148 RREG32(mmMC_VM_AGP_TOP));
1149 dev_info(adev->dev, " MC_VM_AGP_BOT=0x%08X\n",
1150 RREG32(mmMC_VM_AGP_BOT));
1152 dev_info(adev->dev, " HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n",
1153 RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL));
1154 dev_info(adev->dev, " HDP_NONSURFACE_BASE=0x%08X\n",
1155 RREG32(mmHDP_NONSURFACE_BASE));
1156 dev_info(adev->dev, " HDP_NONSURFACE_INFO=0x%08X\n",
1157 RREG32(mmHDP_NONSURFACE_INFO));
1158 dev_info(adev->dev, " HDP_NONSURFACE_SIZE=0x%08X\n",
1159 RREG32(mmHDP_NONSURFACE_SIZE));
1160 dev_info(adev->dev, " HDP_MISC_CNTL=0x%08X\n",
1161 RREG32(mmHDP_MISC_CNTL));
1162 dev_info(adev->dev, " HDP_HOST_PATH_CNTL=0x%08X\n",
1163 RREG32(mmHDP_HOST_PATH_CNTL));
1165 for (i = 0, j = 0; i < 32; i++, j += 0x6) {
1166 dev_info(adev->dev, " %d:\n", i);
1167 dev_info(adev->dev, " 0x%04X=0x%08X\n",
1168 0xb05 + j, RREG32(0xb05 + j));
1169 dev_info(adev->dev, " 0x%04X=0x%08X\n",
1170 0xb06 + j, RREG32(0xb06 + j));
1171 dev_info(adev->dev, " 0x%04X=0x%08X\n",
1172 0xb07 + j, RREG32(0xb07 + j));
1173 dev_info(adev->dev, " 0x%04X=0x%08X\n",
1174 0xb08 + j, RREG32(0xb08 + j));
1175 dev_info(adev->dev, " 0x%04X=0x%08X\n",
1176 0xb09 + j, RREG32(0xb09 + j));
1179 dev_info(adev->dev, " BIF_FB_EN=0x%08X\n",
1180 RREG32(mmBIF_FB_EN));
1183 static int gmc_v8_0_soft_reset(void *handle)
1185 struct amdgpu_mode_mc_save save;
1186 u32 srbm_soft_reset = 0;
1187 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1188 u32 tmp = RREG32(mmSRBM_STATUS);
1190 if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1191 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1192 SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1194 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1195 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1196 if (!(adev->flags & AMD_IS_APU))
1197 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1198 SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1201 if (srbm_soft_reset) {
1202 gmc_v8_0_print_status((void *)adev);
1204 gmc_v8_0_mc_stop(adev, &save);
1205 if (gmc_v8_0_wait_for_idle(adev)) {
1206 dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1210 tmp = RREG32(mmSRBM_SOFT_RESET);
1211 tmp |= srbm_soft_reset;
1212 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1213 WREG32(mmSRBM_SOFT_RESET, tmp);
1214 tmp = RREG32(mmSRBM_SOFT_RESET);
1218 tmp &= ~srbm_soft_reset;
1219 WREG32(mmSRBM_SOFT_RESET, tmp);
1220 tmp = RREG32(mmSRBM_SOFT_RESET);
1222 /* Wait a little for things to settle down */
1225 gmc_v8_0_mc_resume(adev, &save);
1228 gmc_v8_0_print_status((void *)adev);
1234 static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1235 struct amdgpu_irq_src *src,
1237 enum amdgpu_interrupt_state state)
1240 u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1241 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1242 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1243 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1244 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1245 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1246 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1249 case AMDGPU_IRQ_STATE_DISABLE:
1250 /* system context */
1251 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1253 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1255 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1257 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1259 case AMDGPU_IRQ_STATE_ENABLE:
1260 /* system context */
1261 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1263 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1265 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1267 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1276 static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1277 struct amdgpu_irq_src *source,
1278 struct amdgpu_iv_entry *entry)
1280 u32 addr, status, mc_client;
1282 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1283 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1284 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1285 /* reset addr and status */
1286 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1288 if (!addr && !status)
1291 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1292 gmc_v8_0_set_fault_enable_default(adev, false);
1294 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1295 entry->src_id, entry->src_data);
1296 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1298 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1300 gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
1305 static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev,
1311 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1312 data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1313 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1315 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1316 data |= MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1317 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1319 data = RREG32(mmMC_HUB_MISC_VM_CG);
1320 data |= MC_HUB_MISC_VM_CG__ENABLE_MASK;
1321 WREG32(mmMC_HUB_MISC_VM_CG, data);
1323 data = RREG32(mmMC_XPB_CLK_GAT);
1324 data |= MC_XPB_CLK_GAT__ENABLE_MASK;
1325 WREG32(mmMC_XPB_CLK_GAT, data);
1327 data = RREG32(mmATC_MISC_CG);
1328 data |= ATC_MISC_CG__ENABLE_MASK;
1329 WREG32(mmATC_MISC_CG, data);
1331 data = RREG32(mmMC_CITF_MISC_WR_CG);
1332 data |= MC_CITF_MISC_WR_CG__ENABLE_MASK;
1333 WREG32(mmMC_CITF_MISC_WR_CG, data);
1335 data = RREG32(mmMC_CITF_MISC_RD_CG);
1336 data |= MC_CITF_MISC_RD_CG__ENABLE_MASK;
1337 WREG32(mmMC_CITF_MISC_RD_CG, data);
1339 data = RREG32(mmMC_CITF_MISC_VM_CG);
1340 data |= MC_CITF_MISC_VM_CG__ENABLE_MASK;
1341 WREG32(mmMC_CITF_MISC_VM_CG, data);
1343 data = RREG32(mmVM_L2_CG);
1344 data |= VM_L2_CG__ENABLE_MASK;
1345 WREG32(mmVM_L2_CG, data);
1347 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1348 data &= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1349 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1351 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1352 data &= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1353 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1355 data = RREG32(mmMC_HUB_MISC_VM_CG);
1356 data &= ~MC_HUB_MISC_VM_CG__ENABLE_MASK;
1357 WREG32(mmMC_HUB_MISC_VM_CG, data);
1359 data = RREG32(mmMC_XPB_CLK_GAT);
1360 data &= ~MC_XPB_CLK_GAT__ENABLE_MASK;
1361 WREG32(mmMC_XPB_CLK_GAT, data);
1363 data = RREG32(mmATC_MISC_CG);
1364 data &= ~ATC_MISC_CG__ENABLE_MASK;
1365 WREG32(mmATC_MISC_CG, data);
1367 data = RREG32(mmMC_CITF_MISC_WR_CG);
1368 data &= ~MC_CITF_MISC_WR_CG__ENABLE_MASK;
1369 WREG32(mmMC_CITF_MISC_WR_CG, data);
1371 data = RREG32(mmMC_CITF_MISC_RD_CG);
1372 data &= ~MC_CITF_MISC_RD_CG__ENABLE_MASK;
1373 WREG32(mmMC_CITF_MISC_RD_CG, data);
1375 data = RREG32(mmMC_CITF_MISC_VM_CG);
1376 data &= ~MC_CITF_MISC_VM_CG__ENABLE_MASK;
1377 WREG32(mmMC_CITF_MISC_VM_CG, data);
1379 data = RREG32(mmVM_L2_CG);
1380 data &= ~VM_L2_CG__ENABLE_MASK;
1381 WREG32(mmVM_L2_CG, data);
1385 static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
1391 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1392 data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1393 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1395 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1396 data |= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1397 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1399 data = RREG32(mmMC_HUB_MISC_VM_CG);
1400 data |= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1401 WREG32(mmMC_HUB_MISC_VM_CG, data);
1403 data = RREG32(mmMC_XPB_CLK_GAT);
1404 data |= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1405 WREG32(mmMC_XPB_CLK_GAT, data);
1407 data = RREG32(mmATC_MISC_CG);
1408 data |= ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1409 WREG32(mmATC_MISC_CG, data);
1411 data = RREG32(mmMC_CITF_MISC_WR_CG);
1412 data |= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1413 WREG32(mmMC_CITF_MISC_WR_CG, data);
1415 data = RREG32(mmMC_CITF_MISC_RD_CG);
1416 data |= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1417 WREG32(mmMC_CITF_MISC_RD_CG, data);
1419 data = RREG32(mmMC_CITF_MISC_VM_CG);
1420 data |= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1421 WREG32(mmMC_CITF_MISC_VM_CG, data);
1423 data = RREG32(mmVM_L2_CG);
1424 data |= VM_L2_CG__MEM_LS_ENABLE_MASK;
1425 WREG32(mmVM_L2_CG, data);
1427 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1428 data &= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1429 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1431 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1432 data &= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1433 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1435 data = RREG32(mmMC_HUB_MISC_VM_CG);
1436 data &= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1437 WREG32(mmMC_HUB_MISC_VM_CG, data);
1439 data = RREG32(mmMC_XPB_CLK_GAT);
1440 data &= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1441 WREG32(mmMC_XPB_CLK_GAT, data);
1443 data = RREG32(mmATC_MISC_CG);
1444 data &= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1445 WREG32(mmATC_MISC_CG, data);
1447 data = RREG32(mmMC_CITF_MISC_WR_CG);
1448 data &= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1449 WREG32(mmMC_CITF_MISC_WR_CG, data);
1451 data = RREG32(mmMC_CITF_MISC_RD_CG);
1452 data &= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1453 WREG32(mmMC_CITF_MISC_RD_CG, data);
1455 data = RREG32(mmMC_CITF_MISC_VM_CG);
1456 data &= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1457 WREG32(mmMC_CITF_MISC_VM_CG, data);
1459 data = RREG32(mmVM_L2_CG);
1460 data &= ~VM_L2_CG__MEM_LS_ENABLE_MASK;
1461 WREG32(mmVM_L2_CG, data);
1465 static int gmc_v8_0_set_clockgating_state(void *handle,
1466 enum amd_clockgating_state state)
1468 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1470 switch (adev->asic_type) {
1472 fiji_update_mc_medium_grain_clock_gating(adev,
1473 state == AMD_CG_STATE_GATE ? true : false);
1474 fiji_update_mc_light_sleep(adev,
1475 state == AMD_CG_STATE_GATE ? true : false);
1483 static int gmc_v8_0_set_powergating_state(void *handle,
1484 enum amd_powergating_state state)
1489 const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
1490 .early_init = gmc_v8_0_early_init,
1491 .late_init = gmc_v8_0_late_init,
1492 .sw_init = gmc_v8_0_sw_init,
1493 .sw_fini = gmc_v8_0_sw_fini,
1494 .hw_init = gmc_v8_0_hw_init,
1495 .hw_fini = gmc_v8_0_hw_fini,
1496 .suspend = gmc_v8_0_suspend,
1497 .resume = gmc_v8_0_resume,
1498 .is_idle = gmc_v8_0_is_idle,
1499 .wait_for_idle = gmc_v8_0_wait_for_idle,
1500 .soft_reset = gmc_v8_0_soft_reset,
1501 .print_status = gmc_v8_0_print_status,
1502 .set_clockgating_state = gmc_v8_0_set_clockgating_state,
1503 .set_powergating_state = gmc_v8_0_set_powergating_state,
1506 static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = {
1507 .flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb,
1508 .set_pte_pde = gmc_v8_0_gart_set_pte_pde,
1511 static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
1512 .set = gmc_v8_0_vm_fault_interrupt_state,
1513 .process = gmc_v8_0_process_interrupt,
1516 static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev)
1518 if (adev->gart.gart_funcs == NULL)
1519 adev->gart.gart_funcs = &gmc_v8_0_gart_funcs;
1522 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
1524 adev->mc.vm_fault.num_types = 1;
1525 adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs;