2 * Copyright 2021 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/delay.h>
24 #include <linux/kernel.h>
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
29 #include "amdgpu_gfx.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_smu.h"
32 #include "amdgpu_atomfirmware.h"
33 #include "imu_v11_0.h"
37 #include "gc/gc_11_0_0_offset.h"
38 #include "gc/gc_11_0_0_sh_mask.h"
39 #include "smuio/smuio_13_0_6_offset.h"
40 #include "smuio/smuio_13_0_6_sh_mask.h"
41 #include "navi10_enum.h"
42 #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
46 #include "clearstate_gfx11.h"
47 #include "v11_structs.h"
48 #include "gfx_v11_0.h"
49 #include "gfx_v11_0_3.h"
50 #include "nbio_v4_3.h"
51 #include "mes_v11_0.h"
53 #define GFX11_NUM_GFX_RINGS 1
54 #define GFX11_MEC_HPD_SIZE 2048
56 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
57 #define RLC_PG_DELAY_3_DEFAULT_GC_11_0_1 0x1388
59 #define regCGTT_WD_CLK_CTRL 0x5086
60 #define regCGTT_WD_CLK_CTRL_BASE_IDX 1
61 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1 0x4e7e
62 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1_BASE_IDX 1
63 #define regPC_CONFIG_CNTL_1 0x194d
64 #define regPC_CONFIG_CNTL_1_BASE_IDX 1
66 MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin");
67 MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin");
68 MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin");
69 MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin");
70 MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_1.bin");
71 MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin");
72 MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin");
73 MODULE_FIRMWARE("amdgpu/gc_11_0_1_me.bin");
74 MODULE_FIRMWARE("amdgpu/gc_11_0_1_mec.bin");
75 MODULE_FIRMWARE("amdgpu/gc_11_0_1_rlc.bin");
76 MODULE_FIRMWARE("amdgpu/gc_11_0_2_pfp.bin");
77 MODULE_FIRMWARE("amdgpu/gc_11_0_2_me.bin");
78 MODULE_FIRMWARE("amdgpu/gc_11_0_2_mec.bin");
79 MODULE_FIRMWARE("amdgpu/gc_11_0_2_rlc.bin");
80 MODULE_FIRMWARE("amdgpu/gc_11_0_3_pfp.bin");
81 MODULE_FIRMWARE("amdgpu/gc_11_0_3_me.bin");
82 MODULE_FIRMWARE("amdgpu/gc_11_0_3_mec.bin");
83 MODULE_FIRMWARE("amdgpu/gc_11_0_3_rlc.bin");
84 MODULE_FIRMWARE("amdgpu/gc_11_0_4_pfp.bin");
85 MODULE_FIRMWARE("amdgpu/gc_11_0_4_me.bin");
86 MODULE_FIRMWARE("amdgpu/gc_11_0_4_mec.bin");
87 MODULE_FIRMWARE("amdgpu/gc_11_0_4_rlc.bin");
88 MODULE_FIRMWARE("amdgpu/gc_11_5_0_pfp.bin");
89 MODULE_FIRMWARE("amdgpu/gc_11_5_0_me.bin");
90 MODULE_FIRMWARE("amdgpu/gc_11_5_0_mec.bin");
91 MODULE_FIRMWARE("amdgpu/gc_11_5_0_rlc.bin");
93 static const struct soc15_reg_golden golden_settings_gc_11_0[] = {
94 SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL, 0x20000000, 0x20000000)
97 static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
99 SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_GS_NGG_CLK_CTRL, 0x9fff8fff, 0x00000010),
100 SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_WD_CLK_CTRL, 0xffff8fff, 0x00000010),
101 SOC15_REG_GOLDEN_VALUE(GC, 0, regCPF_GCR_CNTL, 0x0007ffff, 0x0000c200),
102 SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xffff001b, 0x00f01988),
103 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf0ffffff, 0x00880007),
104 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_ENHANCE_3, 0xfffffffd, 0x00000008),
105 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_VRS_SURFACE_CNTL_1, 0xfff891ff, 0x55480100),
106 SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000),
107 SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xfcffffff, 0x0000000a)
110 #define DEFAULT_SH_MEM_CONFIG \
111 ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
112 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
113 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
115 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev);
116 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev);
117 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev);
118 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev);
119 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev);
120 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev);
121 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev);
122 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
123 struct amdgpu_cu_info *cu_info);
124 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev);
125 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
126 u32 sh_num, u32 instance, int xcc_id);
127 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev);
129 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
130 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
131 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
133 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
134 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
135 uint16_t pasid, uint32_t flush_type,
136 bool all_hub, uint8_t dst_sel);
137 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
138 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
139 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
142 static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
144 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
145 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
146 PACKET3_SET_RESOURCES_UNMAP_LATENTY(0xa) | /* unmap_latency: 0xa (~ 1s) */
147 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
148 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
149 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
150 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
151 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
152 amdgpu_ring_write(kiq_ring, 0); /* oac mask */
153 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
156 static void gfx11_kiq_map_queues(struct amdgpu_ring *kiq_ring,
157 struct amdgpu_ring *ring)
159 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
160 uint64_t wptr_addr = ring->wptr_gpu_addr;
161 uint32_t me = 0, eng_sel = 0;
163 switch (ring->funcs->type) {
164 case AMDGPU_RING_TYPE_COMPUTE:
168 case AMDGPU_RING_TYPE_GFX:
172 case AMDGPU_RING_TYPE_MES:
180 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
181 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
182 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
183 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
184 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
185 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
186 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
187 PACKET3_MAP_QUEUES_ME((me)) |
188 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
189 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
190 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
191 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
192 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
193 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
194 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
195 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
196 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
199 static void gfx11_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
200 struct amdgpu_ring *ring,
201 enum amdgpu_unmap_queues_action action,
202 u64 gpu_addr, u64 seq)
204 struct amdgpu_device *adev = kiq_ring->adev;
205 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
207 if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
208 amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq);
212 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
213 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
214 PACKET3_UNMAP_QUEUES_ACTION(action) |
215 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
216 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
217 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
218 amdgpu_ring_write(kiq_ring,
219 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
221 if (action == PREEMPT_QUEUES_NO_UNMAP) {
222 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
223 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
224 amdgpu_ring_write(kiq_ring, seq);
226 amdgpu_ring_write(kiq_ring, 0);
227 amdgpu_ring_write(kiq_ring, 0);
228 amdgpu_ring_write(kiq_ring, 0);
232 static void gfx11_kiq_query_status(struct amdgpu_ring *kiq_ring,
233 struct amdgpu_ring *ring,
237 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
239 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
240 amdgpu_ring_write(kiq_ring,
241 PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
242 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
243 PACKET3_QUERY_STATUS_COMMAND(2));
244 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
245 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
246 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
247 amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
248 amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
249 amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
250 amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
253 static void gfx11_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
254 uint16_t pasid, uint32_t flush_type,
257 gfx_v11_0_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1);
260 static const struct kiq_pm4_funcs gfx_v11_0_kiq_pm4_funcs = {
261 .kiq_set_resources = gfx11_kiq_set_resources,
262 .kiq_map_queues = gfx11_kiq_map_queues,
263 .kiq_unmap_queues = gfx11_kiq_unmap_queues,
264 .kiq_query_status = gfx11_kiq_query_status,
265 .kiq_invalidate_tlbs = gfx11_kiq_invalidate_tlbs,
266 .set_resources_size = 8,
267 .map_queues_size = 7,
268 .unmap_queues_size = 6,
269 .query_status_size = 7,
270 .invalidate_tlbs_size = 2,
273 static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
275 adev->gfx.kiq[0].pmf = &gfx_v11_0_kiq_pm4_funcs;
278 static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
280 if (amdgpu_sriov_vf(adev))
283 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
284 case IP_VERSION(11, 0, 1):
285 case IP_VERSION(11, 0, 4):
286 soc15_program_register_sequence(adev,
287 golden_settings_gc_11_0_1,
288 (const u32)ARRAY_SIZE(golden_settings_gc_11_0_1));
293 soc15_program_register_sequence(adev,
294 golden_settings_gc_11_0,
295 (const u32)ARRAY_SIZE(golden_settings_gc_11_0));
299 static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
300 bool wc, uint32_t reg, uint32_t val)
302 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
303 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
304 WRITE_DATA_DST_SEL(0) | (wc ? WR_CONFIRM : 0));
305 amdgpu_ring_write(ring, reg);
306 amdgpu_ring_write(ring, 0);
307 amdgpu_ring_write(ring, val);
310 static void gfx_v11_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
311 int mem_space, int opt, uint32_t addr0,
312 uint32_t addr1, uint32_t ref, uint32_t mask,
315 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
316 amdgpu_ring_write(ring,
317 /* memory (1) or register (0) */
318 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
319 WAIT_REG_MEM_OPERATION(opt) | /* wait */
320 WAIT_REG_MEM_FUNCTION(3) | /* equal */
321 WAIT_REG_MEM_ENGINE(eng_sel)));
324 BUG_ON(addr0 & 0x3); /* Dword align */
325 amdgpu_ring_write(ring, addr0);
326 amdgpu_ring_write(ring, addr1);
327 amdgpu_ring_write(ring, ref);
328 amdgpu_ring_write(ring, mask);
329 amdgpu_ring_write(ring, inv); /* poll interval */
332 static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring)
334 struct amdgpu_device *adev = ring->adev;
335 uint32_t scratch = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
340 WREG32(scratch, 0xCAFEDEAD);
341 r = amdgpu_ring_alloc(ring, 5);
343 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
348 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) {
349 gfx_v11_0_ring_emit_wreg(ring, scratch, 0xDEADBEEF);
351 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
352 amdgpu_ring_write(ring, scratch -
353 PACKET3_SET_UCONFIG_REG_START);
354 amdgpu_ring_write(ring, 0xDEADBEEF);
356 amdgpu_ring_commit(ring);
358 for (i = 0; i < adev->usec_timeout; i++) {
359 tmp = RREG32(scratch);
360 if (tmp == 0xDEADBEEF)
362 if (amdgpu_emu_mode == 1)
368 if (i >= adev->usec_timeout)
373 static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
375 struct amdgpu_device *adev = ring->adev;
377 struct dma_fence *f = NULL;
380 volatile uint32_t *cpu_ptr;
383 /* MES KIQ fw hasn't indirect buffer support for now */
384 if (adev->enable_mes_kiq &&
385 ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
388 memset(&ib, 0, sizeof(ib));
390 if (ring->is_mes_queue) {
391 uint32_t padding, offset;
393 offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
394 padding = amdgpu_mes_ctx_get_offs(ring,
395 AMDGPU_MES_CTX_PADDING_OFFS);
397 ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
398 ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
400 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding);
401 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding);
402 *cpu_ptr = cpu_to_le32(0xCAFEDEAD);
404 r = amdgpu_device_wb_get(adev, &index);
408 gpu_addr = adev->wb.gpu_addr + (index * 4);
409 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
410 cpu_ptr = &adev->wb.wb[index];
412 r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
414 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
419 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
420 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
421 ib.ptr[2] = lower_32_bits(gpu_addr);
422 ib.ptr[3] = upper_32_bits(gpu_addr);
423 ib.ptr[4] = 0xDEADBEEF;
426 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
430 r = dma_fence_wait_timeout(f, false, timeout);
438 if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF)
443 if (!ring->is_mes_queue)
444 amdgpu_ib_free(adev, &ib, NULL);
447 if (!ring->is_mes_queue)
448 amdgpu_device_wb_free(adev, index);
452 static void gfx_v11_0_free_microcode(struct amdgpu_device *adev)
454 amdgpu_ucode_release(&adev->gfx.pfp_fw);
455 amdgpu_ucode_release(&adev->gfx.me_fw);
456 amdgpu_ucode_release(&adev->gfx.rlc_fw);
457 amdgpu_ucode_release(&adev->gfx.mec_fw);
459 kfree(adev->gfx.rlc.register_list_format);
462 static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev, const char *ucode_prefix)
464 const struct psp_firmware_header_v1_0 *toc_hdr;
468 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", ucode_prefix);
469 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name);
473 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
474 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
475 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
476 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
477 adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
478 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
481 amdgpu_ucode_release(&adev->psp.toc_fw);
485 static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev)
487 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
488 case IP_VERSION(11, 0, 0):
489 case IP_VERSION(11, 0, 2):
490 case IP_VERSION(11, 0, 3):
491 if ((adev->gfx.me_fw_version >= 1505) &&
492 (adev->gfx.pfp_fw_version >= 1600) &&
493 (adev->gfx.mec_fw_version >= 512)) {
494 if (amdgpu_sriov_vf(adev))
495 adev->gfx.cp_gfx_shadow = true;
497 adev->gfx.cp_gfx_shadow = false;
501 adev->gfx.cp_gfx_shadow = false;
506 static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
509 char ucode_prefix[30];
511 const struct rlc_firmware_header_v2_0 *rlc_hdr;
512 uint16_t version_major;
513 uint16_t version_minor;
517 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
519 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", ucode_prefix);
520 err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
523 /* check pfp fw hdr version to decide if enable rs64 for gfx11.*/
524 adev->gfx.rs64_enable = amdgpu_ucode_hdr_version(
525 (union amdgpu_firmware_header *)
526 adev->gfx.pfp_fw->data, 2, 0);
527 if (adev->gfx.rs64_enable) {
528 dev_info(adev->dev, "CP RS64 enable\n");
529 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP);
530 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK);
531 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK);
533 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
536 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", ucode_prefix);
537 err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
540 if (adev->gfx.rs64_enable) {
541 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME);
542 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK);
543 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK);
545 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
548 if (!amdgpu_sriov_vf(adev)) {
549 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 0) &&
550 adev->pdev->revision == 0xCE)
551 snprintf(fw_name, sizeof(fw_name), "amdgpu/gc_11_0_0_rlc_1.bin");
553 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix);
554 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
557 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
558 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
559 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
560 err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
565 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", ucode_prefix);
566 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
569 if (adev->gfx.rs64_enable) {
570 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC);
571 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK);
572 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK);
573 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK);
574 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK);
576 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
577 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
580 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
581 err = gfx_v11_0_init_toc_microcode(adev, ucode_prefix);
583 /* only one MEC for gfx 11.0.0. */
584 adev->gfx.mec2_fw = NULL;
586 gfx_v11_0_check_fw_cp_gfx_shadow(adev);
588 if (adev->gfx.imu.funcs && adev->gfx.imu.funcs->init_microcode) {
589 err = adev->gfx.imu.funcs->init_microcode(adev);
591 DRM_ERROR("Failed to init imu firmware!\n");
597 amdgpu_ucode_release(&adev->gfx.pfp_fw);
598 amdgpu_ucode_release(&adev->gfx.me_fw);
599 amdgpu_ucode_release(&adev->gfx.rlc_fw);
600 amdgpu_ucode_release(&adev->gfx.mec_fw);
606 static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev)
609 const struct cs_section_def *sect = NULL;
610 const struct cs_extent_def *ext = NULL;
612 /* begin clear state */
614 /* context control state */
617 for (sect = gfx11_cs_data; sect->section != NULL; ++sect) {
618 for (ext = sect->section; ext->extent != NULL; ++ext) {
619 if (sect->id == SECT_CONTEXT)
620 count += 2 + ext->reg_count;
626 /* set PA_SC_TILE_STEERING_OVERRIDE */
628 /* end clear state */
636 static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev,
637 volatile u32 *buffer)
640 const struct cs_section_def *sect = NULL;
641 const struct cs_extent_def *ext = NULL;
644 if (adev->gfx.rlc.cs_data == NULL)
649 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
650 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
652 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
653 buffer[count++] = cpu_to_le32(0x80000000);
654 buffer[count++] = cpu_to_le32(0x80000000);
656 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
657 for (ext = sect->section; ext->extent != NULL; ++ext) {
658 if (sect->id == SECT_CONTEXT) {
660 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
661 buffer[count++] = cpu_to_le32(ext->reg_index -
662 PACKET3_SET_CONTEXT_REG_START);
663 for (i = 0; i < ext->reg_count; i++)
664 buffer[count++] = cpu_to_le32(ext->extent[i]);
672 SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
673 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
674 buffer[count++] = cpu_to_le32(ctx_reg_offset);
675 buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override);
677 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
678 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
680 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
681 buffer[count++] = cpu_to_le32(0);
684 static void gfx_v11_0_rlc_fini(struct amdgpu_device *adev)
686 /* clear state block */
687 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
688 &adev->gfx.rlc.clear_state_gpu_addr,
689 (void **)&adev->gfx.rlc.cs_ptr);
691 /* jump table block */
692 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
693 &adev->gfx.rlc.cp_table_gpu_addr,
694 (void **)&adev->gfx.rlc.cp_table_ptr);
697 static void gfx_v11_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
699 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
701 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0];
702 reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
703 reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG1);
704 reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG2);
705 reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG3);
706 reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_CNTL);
707 reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_INDEX);
708 reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, regRLC_SPARE_INT_0);
709 adev->gfx.rlc.rlcg_reg_access_supported = true;
712 static int gfx_v11_0_rlc_init(struct amdgpu_device *adev)
714 const struct cs_section_def *cs_data;
717 adev->gfx.rlc.cs_data = gfx11_cs_data;
719 cs_data = adev->gfx.rlc.cs_data;
722 /* init clear state block */
723 r = amdgpu_gfx_rlc_init_csb(adev);
728 /* init spm vmid with 0xf */
729 if (adev->gfx.rlc.funcs->update_spm_vmid)
730 adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
735 static void gfx_v11_0_mec_fini(struct amdgpu_device *adev)
737 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
738 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
739 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL);
742 static void gfx_v11_0_me_init(struct amdgpu_device *adev)
744 bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
746 amdgpu_gfx_graphics_queue_acquire(adev);
749 static int gfx_v11_0_mec_init(struct amdgpu_device *adev)
755 bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
757 /* take ownership of the relevant compute queues */
758 amdgpu_gfx_compute_queue_acquire(adev);
759 mec_hpd_size = adev->gfx.num_compute_rings * GFX11_MEC_HPD_SIZE;
762 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
763 AMDGPU_GEM_DOMAIN_GTT,
764 &adev->gfx.mec.hpd_eop_obj,
765 &adev->gfx.mec.hpd_eop_gpu_addr,
768 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
769 gfx_v11_0_mec_fini(adev);
773 memset(hpd, 0, mec_hpd_size);
775 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
776 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
782 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address)
784 WREG32_SOC15(GC, 0, regSQ_IND_INDEX,
785 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
786 (address << SQ_IND_INDEX__INDEX__SHIFT));
787 return RREG32_SOC15(GC, 0, regSQ_IND_DATA);
790 static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave,
791 uint32_t thread, uint32_t regno,
792 uint32_t num, uint32_t *out)
794 WREG32_SOC15(GC, 0, regSQ_IND_INDEX,
795 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
796 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
797 (thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) |
798 (SQ_IND_INDEX__AUTO_INCR_MASK));
800 *(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA);
803 static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
805 /* in gfx11 the SIMD_ID is specified as part of the INSTANCE
806 * field when performing a select_se_sh so it should be
810 /* type 3 wave data */
811 dst[(*no_fields)++] = 3;
812 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS);
813 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO);
814 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI);
815 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO);
816 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI);
817 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1);
818 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2);
819 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC);
820 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC);
821 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAPSTS);
822 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS);
823 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2);
824 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1);
825 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0);
826 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE);
829 static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
830 uint32_t wave, uint32_t start,
831 uint32_t size, uint32_t *dst)
836 adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size,
840 static void gfx_v11_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
841 uint32_t wave, uint32_t thread,
842 uint32_t start, uint32_t size,
847 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
850 static void gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev,
851 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
853 soc21_grbm_select(adev, me, pipe, q, vm);
856 /* all sizes are in bytes */
857 #define MQD_SHADOW_BASE_SIZE 73728
858 #define MQD_SHADOW_BASE_ALIGNMENT 256
859 #define MQD_FWWORKAREA_SIZE 484
860 #define MQD_FWWORKAREA_ALIGNMENT 256
862 static int gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device *adev,
863 struct amdgpu_gfx_shadow_info *shadow_info)
865 if (adev->gfx.cp_gfx_shadow) {
866 shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE;
867 shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT;
868 shadow_info->csa_size = MQD_FWWORKAREA_SIZE;
869 shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT;
872 memset(shadow_info, 0, sizeof(struct amdgpu_gfx_shadow_info));
877 static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = {
878 .get_gpu_clock_counter = &gfx_v11_0_get_gpu_clock_counter,
879 .select_se_sh = &gfx_v11_0_select_se_sh,
880 .read_wave_data = &gfx_v11_0_read_wave_data,
881 .read_wave_sgprs = &gfx_v11_0_read_wave_sgprs,
882 .read_wave_vgprs = &gfx_v11_0_read_wave_vgprs,
883 .select_me_pipe_q = &gfx_v11_0_select_me_pipe_q,
884 .update_perfmon_mgcg = &gfx_v11_0_update_perf_clk,
885 .get_gfx_shadow_info = &gfx_v11_0_get_gfx_shadow_info,
888 static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
890 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
891 case IP_VERSION(11, 0, 0):
892 case IP_VERSION(11, 0, 2):
893 adev->gfx.config.max_hw_contexts = 8;
894 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
895 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
896 adev->gfx.config.sc_hiz_tile_fifo_size = 0;
897 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
899 case IP_VERSION(11, 0, 3):
900 adev->gfx.ras = &gfx_v11_0_3_ras;
901 adev->gfx.config.max_hw_contexts = 8;
902 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
903 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
904 adev->gfx.config.sc_hiz_tile_fifo_size = 0;
905 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
907 case IP_VERSION(11, 0, 1):
908 case IP_VERSION(11, 0, 4):
909 case IP_VERSION(11, 5, 0):
910 adev->gfx.config.max_hw_contexts = 8;
911 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
912 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
913 adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
914 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x300;
924 static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
925 int me, int pipe, int queue)
928 struct amdgpu_ring *ring;
929 unsigned int irq_type;
931 ring = &adev->gfx.gfx_ring[ring_id];
937 ring->ring_obj = NULL;
938 ring->use_doorbell = true;
941 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
943 ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1;
944 ring->vm_hub = AMDGPU_GFXHUB(0);
945 sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
947 irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
948 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
949 AMDGPU_RING_PRIO_DEFAULT, NULL);
955 static int gfx_v11_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
956 int mec, int pipe, int queue)
960 struct amdgpu_ring *ring;
961 unsigned int hw_prio;
963 ring = &adev->gfx.compute_ring[ring_id];
970 ring->ring_obj = NULL;
971 ring->use_doorbell = true;
972 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
973 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
974 + (ring_id * GFX11_MEC_HPD_SIZE);
975 ring->vm_hub = AMDGPU_GFXHUB(0);
976 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
978 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
979 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
981 hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
982 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
983 /* type-2 packets are deprecated on MEC, use type-3 instead */
984 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
993 SOC21_FIRMWARE_ID id;
996 } rlc_autoload_info[SOC21_FIRMWARE_ID_MAX];
998 static void gfx_v11_0_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc)
1000 RLC_TABLE_OF_CONTENT *ucode = rlc_toc;
1002 while (ucode && (ucode->id > SOC21_FIRMWARE_ID_INVALID) &&
1003 (ucode->id < SOC21_FIRMWARE_ID_MAX)) {
1004 rlc_autoload_info[ucode->id].id = ucode->id;
1005 rlc_autoload_info[ucode->id].offset = ucode->offset * 4;
1006 rlc_autoload_info[ucode->id].size = ucode->size * 4;
1012 static uint32_t gfx_v11_0_calc_toc_total_size(struct amdgpu_device *adev)
1014 uint32_t total_size = 0;
1015 SOC21_FIRMWARE_ID id;
1017 gfx_v11_0_parse_rlc_toc(adev, adev->psp.toc.start_addr);
1019 for (id = SOC21_FIRMWARE_ID_RLC_G_UCODE; id < SOC21_FIRMWARE_ID_MAX; id++)
1020 total_size += rlc_autoload_info[id].size;
1022 /* In case the offset in rlc toc ucode is aligned */
1023 if (total_size < rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset)
1024 total_size = rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset +
1025 rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].size;
1030 static int gfx_v11_0_rlc_autoload_buffer_init(struct amdgpu_device *adev)
1033 uint32_t total_size;
1035 total_size = gfx_v11_0_calc_toc_total_size(adev);
1037 r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024,
1038 AMDGPU_GEM_DOMAIN_VRAM |
1039 AMDGPU_GEM_DOMAIN_GTT,
1040 &adev->gfx.rlc.rlc_autoload_bo,
1041 &adev->gfx.rlc.rlc_autoload_gpu_addr,
1042 (void **)&adev->gfx.rlc.rlc_autoload_ptr);
1045 dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r);
1052 static void gfx_v11_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev,
1053 SOC21_FIRMWARE_ID id,
1054 const void *fw_data,
1056 uint32_t *fw_autoload_mask)
1058 uint32_t toc_offset;
1059 uint32_t toc_fw_size;
1060 char *ptr = adev->gfx.rlc.rlc_autoload_ptr;
1062 if (id <= SOC21_FIRMWARE_ID_INVALID || id >= SOC21_FIRMWARE_ID_MAX)
1065 toc_offset = rlc_autoload_info[id].offset;
1066 toc_fw_size = rlc_autoload_info[id].size;
1069 fw_size = toc_fw_size;
1071 if (fw_size > toc_fw_size)
1072 fw_size = toc_fw_size;
1074 memcpy(ptr + toc_offset, fw_data, fw_size);
1076 if (fw_size < toc_fw_size)
1077 memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size);
1079 if ((id != SOC21_FIRMWARE_ID_RS64_PFP) && (id != SOC21_FIRMWARE_ID_RS64_ME))
1080 *(uint64_t *)fw_autoload_mask |= 1ULL << id;
1083 static void gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev,
1084 uint32_t *fw_autoload_mask)
1090 *(uint64_t *)fw_autoload_mask |= 0x1;
1092 DRM_DEBUG("rlc autoload enabled fw: 0x%llx\n", *(uint64_t *)fw_autoload_mask);
1094 data = adev->psp.toc.start_addr;
1095 size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_TOC].size;
1097 toc_ptr = (uint64_t *)data + size / 8 - 1;
1098 *toc_ptr = *(uint64_t *)fw_autoload_mask;
1100 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_TOC,
1101 data, size, fw_autoload_mask);
1104 static void gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev,
1105 uint32_t *fw_autoload_mask)
1107 const __le32 *fw_data;
1109 const struct gfx_firmware_header_v1_0 *cp_hdr;
1110 const struct gfx_firmware_header_v2_0 *cpv2_hdr;
1111 const struct rlc_firmware_header_v2_0 *rlc_hdr;
1112 const struct rlc_firmware_header_v2_2 *rlcv22_hdr;
1113 uint16_t version_major, version_minor;
1115 if (adev->gfx.rs64_enable) {
1117 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1118 adev->gfx.pfp_fw->data;
1120 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1121 le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1122 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1123 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP,
1124 fw_data, fw_size, fw_autoload_mask);
1126 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1127 le32_to_cpu(cpv2_hdr->data_offset_bytes));
1128 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1129 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK,
1130 fw_data, fw_size, fw_autoload_mask);
1131 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P1_STACK,
1132 fw_data, fw_size, fw_autoload_mask);
1134 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1135 adev->gfx.me_fw->data;
1137 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1138 le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1139 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1140 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME,
1141 fw_data, fw_size, fw_autoload_mask);
1143 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1144 le32_to_cpu(cpv2_hdr->data_offset_bytes));
1145 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1146 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P0_STACK,
1147 fw_data, fw_size, fw_autoload_mask);
1148 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P1_STACK,
1149 fw_data, fw_size, fw_autoload_mask);
1151 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1152 adev->gfx.mec_fw->data;
1154 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1155 le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1156 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1157 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC,
1158 fw_data, fw_size, fw_autoload_mask);
1160 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1161 le32_to_cpu(cpv2_hdr->data_offset_bytes));
1162 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1163 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK,
1164 fw_data, fw_size, fw_autoload_mask);
1165 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P1_STACK,
1166 fw_data, fw_size, fw_autoload_mask);
1167 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P2_STACK,
1168 fw_data, fw_size, fw_autoload_mask);
1169 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P3_STACK,
1170 fw_data, fw_size, fw_autoload_mask);
1173 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1174 adev->gfx.pfp_fw->data;
1175 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1176 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1177 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1178 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_PFP,
1179 fw_data, fw_size, fw_autoload_mask);
1182 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1183 adev->gfx.me_fw->data;
1184 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1185 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1186 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1187 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_ME,
1188 fw_data, fw_size, fw_autoload_mask);
1191 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1192 adev->gfx.mec_fw->data;
1193 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1194 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1195 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1196 cp_hdr->jt_size * 4;
1197 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_MEC,
1198 fw_data, fw_size, fw_autoload_mask);
1202 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)
1203 adev->gfx.rlc_fw->data;
1204 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1205 le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes));
1206 fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes);
1207 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_G_UCODE,
1208 fw_data, fw_size, fw_autoload_mask);
1210 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1211 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1212 if (version_major == 2) {
1213 if (version_minor >= 2) {
1214 rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
1216 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1217 le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes));
1218 fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes);
1219 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_UCODE,
1220 fw_data, fw_size, fw_autoload_mask);
1222 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1223 le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes));
1224 fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes);
1225 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_DRAM_BOOT,
1226 fw_data, fw_size, fw_autoload_mask);
1231 static void gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev,
1232 uint32_t *fw_autoload_mask)
1234 const __le32 *fw_data;
1236 const struct sdma_firmware_header_v2_0 *sdma_hdr;
1238 sdma_hdr = (const struct sdma_firmware_header_v2_0 *)
1239 adev->sdma.instance[0].fw->data;
1240 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data +
1241 le32_to_cpu(sdma_hdr->header.ucode_array_offset_bytes));
1242 fw_size = le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes);
1244 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1245 SOC21_FIRMWARE_ID_SDMA_UCODE_TH0, fw_data, fw_size, fw_autoload_mask);
1247 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data +
1248 le32_to_cpu(sdma_hdr->ctl_ucode_offset));
1249 fw_size = le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes);
1251 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1252 SOC21_FIRMWARE_ID_SDMA_UCODE_TH1, fw_data, fw_size, fw_autoload_mask);
1255 static void gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev,
1256 uint32_t *fw_autoload_mask)
1258 const __le32 *fw_data;
1260 const struct mes_firmware_header_v1_0 *mes_hdr;
1261 int pipe, ucode_id, data_id;
1263 for (pipe = 0; pipe < 2; pipe++) {
1265 ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P0;
1266 data_id = SOC21_FIRMWARE_ID_RS64_MES_P0_STACK;
1268 ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P1;
1269 data_id = SOC21_FIRMWARE_ID_RS64_MES_P1_STACK;
1272 mes_hdr = (const struct mes_firmware_header_v1_0 *)
1273 adev->mes.fw[pipe]->data;
1275 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1276 le32_to_cpu(mes_hdr->mes_ucode_offset_bytes));
1277 fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
1279 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1280 ucode_id, fw_data, fw_size, fw_autoload_mask);
1282 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1283 le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes));
1284 fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
1286 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1287 data_id, fw_data, fw_size, fw_autoload_mask);
1291 static int gfx_v11_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev)
1293 uint32_t rlc_g_offset, rlc_g_size;
1295 uint32_t autoload_fw_id[2];
1297 memset(autoload_fw_id, 0, sizeof(uint32_t) * 2);
1299 /* RLC autoload sequence 2: copy ucode */
1300 gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(adev, autoload_fw_id);
1301 gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(adev, autoload_fw_id);
1302 gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(adev, autoload_fw_id);
1303 gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(adev, autoload_fw_id);
1305 rlc_g_offset = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].offset;
1306 rlc_g_size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].size;
1307 gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset;
1309 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_HI, upper_32_bits(gpu_addr));
1310 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_LO, lower_32_bits(gpu_addr));
1312 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_SIZE, rlc_g_size);
1314 /* RLC autoload sequence 3: load IMU fw */
1315 if (adev->gfx.imu.funcs->load_microcode)
1316 adev->gfx.imu.funcs->load_microcode(adev);
1317 /* RLC autoload sequence 4 init IMU fw */
1318 if (adev->gfx.imu.funcs->setup_imu)
1319 adev->gfx.imu.funcs->setup_imu(adev);
1320 if (adev->gfx.imu.funcs->start_imu)
1321 adev->gfx.imu.funcs->start_imu(adev);
1323 /* RLC autoload sequence 5 disable gpa mode */
1324 gfx_v11_0_disable_gpa_mode(adev);
1329 static int gfx_v11_0_sw_init(void *handle)
1331 int i, j, k, r, ring_id = 0;
1332 struct amdgpu_kiq *kiq;
1333 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1335 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1336 case IP_VERSION(11, 0, 0):
1337 case IP_VERSION(11, 0, 2):
1338 case IP_VERSION(11, 0, 3):
1339 adev->gfx.me.num_me = 1;
1340 adev->gfx.me.num_pipe_per_me = 1;
1341 adev->gfx.me.num_queue_per_pipe = 1;
1342 adev->gfx.mec.num_mec = 2;
1343 adev->gfx.mec.num_pipe_per_mec = 4;
1344 adev->gfx.mec.num_queue_per_pipe = 4;
1346 case IP_VERSION(11, 0, 1):
1347 case IP_VERSION(11, 0, 4):
1348 case IP_VERSION(11, 5, 0):
1349 adev->gfx.me.num_me = 1;
1350 adev->gfx.me.num_pipe_per_me = 1;
1351 adev->gfx.me.num_queue_per_pipe = 1;
1352 adev->gfx.mec.num_mec = 1;
1353 adev->gfx.mec.num_pipe_per_mec = 4;
1354 adev->gfx.mec.num_queue_per_pipe = 4;
1357 adev->gfx.me.num_me = 1;
1358 adev->gfx.me.num_pipe_per_me = 1;
1359 adev->gfx.me.num_queue_per_pipe = 1;
1360 adev->gfx.mec.num_mec = 1;
1361 adev->gfx.mec.num_pipe_per_mec = 4;
1362 adev->gfx.mec.num_queue_per_pipe = 8;
1366 /* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */
1367 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3) &&
1368 amdgpu_sriov_is_pp_one_vf(adev))
1369 adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG;
1372 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1373 GFX_11_0_0__SRCID__CP_EOP_INTERRUPT,
1374 &adev->gfx.eop_irq);
1378 /* Privileged reg */
1379 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1380 GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT,
1381 &adev->gfx.priv_reg_irq);
1385 /* Privileged inst */
1386 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1387 GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT,
1388 &adev->gfx.priv_inst_irq);
1393 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
1394 GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT,
1395 &adev->gfx.rlc_gc_fed_irq);
1399 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1401 gfx_v11_0_me_init(adev);
1403 r = gfx_v11_0_rlc_init(adev);
1405 DRM_ERROR("Failed to init rlc BOs!\n");
1409 r = gfx_v11_0_mec_init(adev);
1411 DRM_ERROR("Failed to init MEC BOs!\n");
1415 /* set up the gfx ring */
1416 for (i = 0; i < adev->gfx.me.num_me; i++) {
1417 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
1418 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
1419 if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
1422 r = gfx_v11_0_gfx_ring_init(adev, ring_id,
1432 /* set up the compute queues - allocate horizontally across pipes */
1433 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1434 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1435 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1436 if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
1440 r = gfx_v11_0_compute_ring_init(adev, ring_id,
1450 if (!adev->enable_mes_kiq) {
1451 r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE, 0);
1453 DRM_ERROR("Failed to init KIQ BOs!\n");
1457 kiq = &adev->gfx.kiq[0];
1458 r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0);
1463 r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v11_compute_mqd), 0);
1467 /* allocate visible FB for rlc auto-loading fw */
1468 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1469 r = gfx_v11_0_rlc_autoload_buffer_init(adev);
1474 r = gfx_v11_0_gpu_early_init(adev);
1478 if (amdgpu_gfx_ras_sw_init(adev)) {
1479 dev_err(adev->dev, "Failed to initialize gfx ras block!\n");
1486 static void gfx_v11_0_pfp_fini(struct amdgpu_device *adev)
1488 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj,
1489 &adev->gfx.pfp.pfp_fw_gpu_addr,
1490 (void **)&adev->gfx.pfp.pfp_fw_ptr);
1492 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_data_obj,
1493 &adev->gfx.pfp.pfp_fw_data_gpu_addr,
1494 (void **)&adev->gfx.pfp.pfp_fw_data_ptr);
1497 static void gfx_v11_0_me_fini(struct amdgpu_device *adev)
1499 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj,
1500 &adev->gfx.me.me_fw_gpu_addr,
1501 (void **)&adev->gfx.me.me_fw_ptr);
1503 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_data_obj,
1504 &adev->gfx.me.me_fw_data_gpu_addr,
1505 (void **)&adev->gfx.me.me_fw_data_ptr);
1508 static void gfx_v11_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev)
1510 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo,
1511 &adev->gfx.rlc.rlc_autoload_gpu_addr,
1512 (void **)&adev->gfx.rlc.rlc_autoload_ptr);
1515 static int gfx_v11_0_sw_fini(void *handle)
1518 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1520 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1521 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1522 for (i = 0; i < adev->gfx.num_compute_rings; i++)
1523 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1525 amdgpu_gfx_mqd_sw_fini(adev, 0);
1527 if (!adev->enable_mes_kiq) {
1528 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
1529 amdgpu_gfx_kiq_fini(adev, 0);
1532 gfx_v11_0_pfp_fini(adev);
1533 gfx_v11_0_me_fini(adev);
1534 gfx_v11_0_rlc_fini(adev);
1535 gfx_v11_0_mec_fini(adev);
1537 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
1538 gfx_v11_0_rlc_autoload_buffer_fini(adev);
1540 gfx_v11_0_free_microcode(adev);
1545 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
1546 u32 sh_num, u32 instance, int xcc_id)
1550 if (instance == 0xffffffff)
1551 data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
1552 INSTANCE_BROADCAST_WRITES, 1);
1554 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
1557 if (se_num == 0xffffffff)
1558 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
1561 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1563 if (sh_num == 0xffffffff)
1564 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES,
1567 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num);
1569 WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, data);
1572 static u32 gfx_v11_0_get_sa_active_bitmap(struct amdgpu_device *adev)
1574 u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask;
1576 gc_disabled_sa_mask = RREG32_SOC15(GC, 0, regCC_GC_SA_UNIT_DISABLE);
1577 gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask,
1578 CC_GC_SA_UNIT_DISABLE,
1580 gc_user_disabled_sa_mask = RREG32_SOC15(GC, 0, regGC_USER_SA_UNIT_DISABLE);
1581 gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask,
1582 GC_USER_SA_UNIT_DISABLE,
1584 sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se *
1585 adev->gfx.config.max_shader_engines);
1587 return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask));
1590 static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1592 u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask;
1595 gc_disabled_rb_mask = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE);
1596 gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask,
1597 CC_RB_BACKEND_DISABLE,
1599 gc_user_disabled_rb_mask = RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE);
1600 gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask,
1601 GC_USER_RB_BACKEND_DISABLE,
1603 rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se *
1604 adev->gfx.config.max_shader_engines);
1606 return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask));
1609 static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
1611 u32 rb_bitmap_width_per_sa;
1613 u32 active_sa_bitmap;
1614 u32 global_active_rb_bitmap;
1615 u32 active_rb_bitmap = 0;
1618 /* query sa bitmap from SA_UNIT_DISABLE registers */
1619 active_sa_bitmap = gfx_v11_0_get_sa_active_bitmap(adev);
1620 /* query rb bitmap from RB_BACKEND_DISABLE registers */
1621 global_active_rb_bitmap = gfx_v11_0_get_rb_active_bitmap(adev);
1623 /* generate active rb bitmap according to active sa bitmap */
1624 max_sa = adev->gfx.config.max_shader_engines *
1625 adev->gfx.config.max_sh_per_se;
1626 rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
1627 adev->gfx.config.max_sh_per_se;
1628 for (i = 0; i < max_sa; i++) {
1629 if (active_sa_bitmap & (1 << i))
1630 active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
1633 active_rb_bitmap |= global_active_rb_bitmap;
1634 adev->gfx.config.backend_enable_mask = active_rb_bitmap;
1635 adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
1638 #define DEFAULT_SH_MEM_BASES (0x6000)
1639 #define LDS_APP_BASE 0x1
1640 #define SCRATCH_APP_BASE 0x2
1642 static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev)
1645 uint32_t sh_mem_bases;
1649 * Configure apertures:
1650 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1651 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1652 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1654 sh_mem_bases = (LDS_APP_BASE << SH_MEM_BASES__SHARED_BASE__SHIFT) |
1657 mutex_lock(&adev->srbm_mutex);
1658 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1659 soc21_grbm_select(adev, 0, 0, 0, i);
1660 /* CP and shaders */
1661 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1662 WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases);
1664 /* Enable trap for each kfd vmid. */
1665 data = RREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL);
1666 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
1667 WREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL, data);
1669 soc21_grbm_select(adev, 0, 0, 0, 0);
1670 mutex_unlock(&adev->srbm_mutex);
1672 /* Initialize all compute VMIDs to have no GDS, GWS, or OA
1673 acccess. These should be enabled by FW for target VMIDs. */
1674 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1675 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * i, 0);
1676 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * i, 0);
1677 WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, i, 0);
1678 WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, i, 0);
1682 static void gfx_v11_0_init_gds_vmid(struct amdgpu_device *adev)
1687 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
1688 * access. Compute VMIDs should be enabled by FW for target VMIDs,
1689 * the driver can enable them for graphics. VMID0 should maintain
1690 * access so that HWS firmware can save/restore entries.
1692 for (vmid = 1; vmid < 16; vmid++) {
1693 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * vmid, 0);
1694 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * vmid, 0);
1695 WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, vmid, 0);
1696 WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, vmid, 0);
1700 static void gfx_v11_0_tcp_harvest(struct amdgpu_device *adev)
1702 /* TODO: harvest feature to be added later. */
1705 static void gfx_v11_0_get_tcc_info(struct amdgpu_device *adev)
1707 /* TCCs are global (not instanced). */
1708 uint32_t tcc_disable = RREG32_SOC15(GC, 0, regCGTS_TCC_DISABLE) |
1709 RREG32_SOC15(GC, 0, regCGTS_USER_TCC_DISABLE);
1711 adev->gfx.config.tcc_disabled_mask =
1712 REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) |
1713 (REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16);
1716 static void gfx_v11_0_constants_init(struct amdgpu_device *adev)
1721 if (!amdgpu_sriov_vf(adev))
1722 WREG32_FIELD15_PREREG(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1724 gfx_v11_0_setup_rb(adev);
1725 gfx_v11_0_get_cu_info(adev, &adev->gfx.cu_info);
1726 gfx_v11_0_get_tcc_info(adev);
1727 adev->gfx.config.pa_sc_tile_steering_override = 0;
1729 /* Set whether texture coordinate truncation is conformant. */
1730 tmp = RREG32_SOC15(GC, 0, regTA_CNTL2);
1731 adev->gfx.config.ta_cntl2_truncate_coord_mode =
1732 REG_GET_FIELD(tmp, TA_CNTL2, TRUNCATE_COORD_MODE);
1734 /* XXX SH_MEM regs */
1735 /* where to put LDS, scratch, GPUVM in FSA64 space */
1736 mutex_lock(&adev->srbm_mutex);
1737 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
1738 soc21_grbm_select(adev, 0, 0, 0, i);
1739 /* CP and shaders */
1740 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1742 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1743 (adev->gmc.private_aperture_start >> 48));
1744 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1745 (adev->gmc.shared_aperture_start >> 48));
1746 WREG32_SOC15(GC, 0, regSH_MEM_BASES, tmp);
1749 soc21_grbm_select(adev, 0, 0, 0, 0);
1751 mutex_unlock(&adev->srbm_mutex);
1753 gfx_v11_0_init_compute_vmid(adev);
1754 gfx_v11_0_init_gds_vmid(adev);
1757 static void gfx_v11_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1762 if (amdgpu_sriov_vf(adev))
1765 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0);
1767 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
1769 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
1771 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
1773 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
1776 WREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0, tmp);
1779 static int gfx_v11_0_init_csb(struct amdgpu_device *adev)
1781 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
1783 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_HI,
1784 adev->gfx.rlc.clear_state_gpu_addr >> 32);
1785 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_LO,
1786 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1787 WREG32_SOC15(GC, 0, regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
1792 static void gfx_v11_0_rlc_stop(struct amdgpu_device *adev)
1794 u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL);
1796 tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
1797 WREG32_SOC15(GC, 0, regRLC_CNTL, tmp);
1800 static void gfx_v11_0_rlc_reset(struct amdgpu_device *adev)
1802 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
1804 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
1808 static void gfx_v11_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev,
1811 uint32_t rlc_pg_cntl;
1813 rlc_pg_cntl = RREG32_SOC15(GC, 0, regRLC_PG_CNTL);
1816 /* RLC_PG_CNTL[23] = 0 (default)
1817 * RLC will wait for handshake acks with SMU
1818 * GFXOFF will be enabled
1819 * RLC_PG_CNTL[23] = 1
1820 * RLC will not issue any message to SMU
1821 * hence no handshake between SMU & RLC
1822 * GFXOFF will be disabled
1824 rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
1826 rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
1827 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, rlc_pg_cntl);
1830 static void gfx_v11_0_rlc_start(struct amdgpu_device *adev)
1832 /* TODO: enable rlc & smu handshake until smu
1833 * and gfxoff feature works as expected */
1834 if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK))
1835 gfx_v11_0_rlc_smu_handshake_cntl(adev, false);
1837 WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
1841 static void gfx_v11_0_rlc_enable_srm(struct amdgpu_device *adev)
1845 /* enable Save Restore Machine */
1846 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL));
1847 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
1848 tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
1849 WREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL), tmp);
1852 static void gfx_v11_0_load_rlcg_microcode(struct amdgpu_device *adev)
1854 const struct rlc_firmware_header_v2_0 *hdr;
1855 const __le32 *fw_data;
1856 unsigned i, fw_size;
1858 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1859 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1860 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1861 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1863 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR,
1864 RLCG_UCODE_LOADING_START_ADDRESS);
1866 for (i = 0; i < fw_size; i++)
1867 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_DATA,
1868 le32_to_cpup(fw_data++));
1870 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
1873 static void gfx_v11_0_load_rlc_iram_dram_microcode(struct amdgpu_device *adev)
1875 const struct rlc_firmware_header_v2_2 *hdr;
1876 const __le32 *fw_data;
1877 unsigned i, fw_size;
1880 hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
1882 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1883 le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes));
1884 fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4;
1886 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, 0);
1888 for (i = 0; i < fw_size; i++) {
1889 if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1891 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_DATA,
1892 le32_to_cpup(fw_data++));
1895 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
1897 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1898 le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes));
1899 fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4;
1901 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_ADDR, 0);
1902 for (i = 0; i < fw_size; i++) {
1903 if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1905 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_DATA,
1906 le32_to_cpup(fw_data++));
1909 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
1911 tmp = RREG32_SOC15(GC, 0, regRLC_LX6_CNTL);
1912 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1);
1913 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0);
1914 WREG32_SOC15(GC, 0, regRLC_LX6_CNTL, tmp);
1917 static void gfx_v11_0_load_rlcp_rlcv_microcode(struct amdgpu_device *adev)
1919 const struct rlc_firmware_header_v2_3 *hdr;
1920 const __le32 *fw_data;
1921 unsigned i, fw_size;
1924 hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data;
1926 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1927 le32_to_cpu(hdr->rlcp_ucode_offset_bytes));
1928 fw_size = le32_to_cpu(hdr->rlcp_ucode_size_bytes) / 4;
1930 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, 0);
1932 for (i = 0; i < fw_size; i++) {
1933 if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1935 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_DATA,
1936 le32_to_cpup(fw_data++));
1939 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, adev->gfx.rlc_fw_version);
1941 tmp = RREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE);
1942 tmp = REG_SET_FIELD(tmp, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1);
1943 WREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE, tmp);
1945 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1946 le32_to_cpu(hdr->rlcv_ucode_offset_bytes));
1947 fw_size = le32_to_cpu(hdr->rlcv_ucode_size_bytes) / 4;
1949 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, 0);
1951 for (i = 0; i < fw_size; i++) {
1952 if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1954 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_DATA,
1955 le32_to_cpup(fw_data++));
1958 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, adev->gfx.rlc_fw_version);
1960 tmp = RREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL);
1961 tmp = REG_SET_FIELD(tmp, RLC_GPU_IOV_F32_CNTL, ENABLE, 1);
1962 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL, tmp);
1965 static int gfx_v11_0_rlc_load_microcode(struct amdgpu_device *adev)
1967 const struct rlc_firmware_header_v2_0 *hdr;
1968 uint16_t version_major;
1969 uint16_t version_minor;
1971 if (!adev->gfx.rlc_fw)
1974 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1975 amdgpu_ucode_print_rlc_hdr(&hdr->header);
1977 version_major = le16_to_cpu(hdr->header.header_version_major);
1978 version_minor = le16_to_cpu(hdr->header.header_version_minor);
1980 if (version_major == 2) {
1981 gfx_v11_0_load_rlcg_microcode(adev);
1982 if (amdgpu_dpm == 1) {
1983 if (version_minor >= 2)
1984 gfx_v11_0_load_rlc_iram_dram_microcode(adev);
1985 if (version_minor == 3)
1986 gfx_v11_0_load_rlcp_rlcv_microcode(adev);
1995 static int gfx_v11_0_rlc_resume(struct amdgpu_device *adev)
1999 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
2000 gfx_v11_0_init_csb(adev);
2002 if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
2003 gfx_v11_0_rlc_enable_srm(adev);
2005 if (amdgpu_sriov_vf(adev)) {
2006 gfx_v11_0_init_csb(adev);
2010 adev->gfx.rlc.funcs->stop(adev);
2013 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, 0);
2016 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, 0);
2018 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
2019 /* legacy rlc firmware loading */
2020 r = gfx_v11_0_rlc_load_microcode(adev);
2025 gfx_v11_0_init_csb(adev);
2027 adev->gfx.rlc.funcs->start(adev);
2032 static int gfx_v11_0_config_me_cache(struct amdgpu_device *adev, uint64_t addr)
2034 uint32_t usec_timeout = 50000; /* wait for 50ms */
2038 /* Trigger an invalidation of the L1 instruction caches */
2039 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2040 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2041 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
2043 /* Wait for invalidation complete */
2044 for (i = 0; i < usec_timeout; i++) {
2045 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2046 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2047 INVALIDATE_CACHE_COMPLETE))
2052 if (i >= usec_timeout) {
2053 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2057 if (amdgpu_emu_mode == 1)
2058 adev->hdp.funcs->flush_hdp(adev, NULL);
2060 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
2061 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2062 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2063 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2064 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2065 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
2067 /* Program me ucode address into intruction cache address register */
2068 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
2069 lower_32_bits(addr) & 0xFFFFF000);
2070 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
2071 upper_32_bits(addr));
2076 static int gfx_v11_0_config_pfp_cache(struct amdgpu_device *adev, uint64_t addr)
2078 uint32_t usec_timeout = 50000; /* wait for 50ms */
2082 /* Trigger an invalidation of the L1 instruction caches */
2083 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2084 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2085 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
2087 /* Wait for invalidation complete */
2088 for (i = 0; i < usec_timeout; i++) {
2089 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2090 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2091 INVALIDATE_CACHE_COMPLETE))
2096 if (i >= usec_timeout) {
2097 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2101 if (amdgpu_emu_mode == 1)
2102 adev->hdp.funcs->flush_hdp(adev, NULL);
2104 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
2105 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2106 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2107 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2108 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2109 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
2111 /* Program pfp ucode address into intruction cache address register */
2112 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
2113 lower_32_bits(addr) & 0xFFFFF000);
2114 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
2115 upper_32_bits(addr));
2120 static int gfx_v11_0_config_mec_cache(struct amdgpu_device *adev, uint64_t addr)
2122 uint32_t usec_timeout = 50000; /* wait for 50ms */
2126 /* Trigger an invalidation of the L1 instruction caches */
2127 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2128 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2130 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
2132 /* Wait for invalidation complete */
2133 for (i = 0; i < usec_timeout; i++) {
2134 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2135 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2136 INVALIDATE_CACHE_COMPLETE))
2141 if (i >= usec_timeout) {
2142 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2146 if (amdgpu_emu_mode == 1)
2147 adev->hdp.funcs->flush_hdp(adev, NULL);
2149 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
2150 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2151 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2152 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2153 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
2155 /* Program mec1 ucode address into intruction cache address register */
2156 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO,
2157 lower_32_bits(addr) & 0xFFFFF000);
2158 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
2159 upper_32_bits(addr));
2164 static int gfx_v11_0_config_pfp_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2166 uint32_t usec_timeout = 50000; /* wait for 50ms */
2168 unsigned i, pipe_id;
2169 const struct gfx_firmware_header_v2_0 *pfp_hdr;
2171 pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
2172 adev->gfx.pfp_fw->data;
2174 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
2175 lower_32_bits(addr));
2176 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
2177 upper_32_bits(addr));
2179 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
2180 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2181 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2182 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2183 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
2186 * Programming any of the CP_PFP_IC_BASE registers
2187 * forces invalidation of the ME L1 I$. Wait for the
2188 * invalidation complete
2190 for (i = 0; i < usec_timeout; i++) {
2191 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2192 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2193 INVALIDATE_CACHE_COMPLETE))
2198 if (i >= usec_timeout) {
2199 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2203 /* Prime the L1 instruction caches */
2204 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2205 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1);
2206 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
2207 /* Waiting for cache primed*/
2208 for (i = 0; i < usec_timeout; i++) {
2209 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2210 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2216 if (i >= usec_timeout) {
2217 dev_err(adev->dev, "failed to prime instruction cache\n");
2221 mutex_lock(&adev->srbm_mutex);
2222 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2223 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2224 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
2225 (pfp_hdr->ucode_start_addr_hi << 30) |
2226 (pfp_hdr->ucode_start_addr_lo >> 2));
2227 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
2228 pfp_hdr->ucode_start_addr_hi >> 2);
2231 * Program CP_ME_CNTL to reset given PIPE to take
2232 * effect of CP_PFP_PRGRM_CNTR_START.
2234 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2236 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2237 PFP_PIPE0_RESET, 1);
2239 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2240 PFP_PIPE1_RESET, 1);
2241 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2243 /* Clear pfp pipe0 reset bit. */
2245 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2246 PFP_PIPE0_RESET, 0);
2248 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2249 PFP_PIPE1_RESET, 0);
2250 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2252 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO,
2253 lower_32_bits(addr2));
2254 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI,
2255 upper_32_bits(addr2));
2257 soc21_grbm_select(adev, 0, 0, 0, 0);
2258 mutex_unlock(&adev->srbm_mutex);
2260 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
2261 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
2262 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
2263 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
2265 /* Invalidate the data caches */
2266 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2267 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2268 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
2270 for (i = 0; i < usec_timeout; i++) {
2271 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2272 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
2273 INVALIDATE_DCACHE_COMPLETE))
2278 if (i >= usec_timeout) {
2279 dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
2286 static int gfx_v11_0_config_me_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2288 uint32_t usec_timeout = 50000; /* wait for 50ms */
2290 unsigned i, pipe_id;
2291 const struct gfx_firmware_header_v2_0 *me_hdr;
2293 me_hdr = (const struct gfx_firmware_header_v2_0 *)
2294 adev->gfx.me_fw->data;
2296 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
2297 lower_32_bits(addr));
2298 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
2299 upper_32_bits(addr));
2301 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
2302 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2303 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2304 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2305 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
2308 * Programming any of the CP_ME_IC_BASE registers
2309 * forces invalidation of the ME L1 I$. Wait for the
2310 * invalidation complete
2312 for (i = 0; i < usec_timeout; i++) {
2313 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2314 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2315 INVALIDATE_CACHE_COMPLETE))
2320 if (i >= usec_timeout) {
2321 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2325 /* Prime the instruction caches */
2326 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2327 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1);
2328 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
2330 /* Waiting for instruction cache primed*/
2331 for (i = 0; i < usec_timeout; i++) {
2332 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2333 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2339 if (i >= usec_timeout) {
2340 dev_err(adev->dev, "failed to prime instruction cache\n");
2344 mutex_lock(&adev->srbm_mutex);
2345 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2346 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2347 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
2348 (me_hdr->ucode_start_addr_hi << 30) |
2349 (me_hdr->ucode_start_addr_lo >> 2) );
2350 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
2351 me_hdr->ucode_start_addr_hi>>2);
2354 * Program CP_ME_CNTL to reset given PIPE to take
2355 * effect of CP_PFP_PRGRM_CNTR_START.
2357 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2359 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2362 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2364 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2366 /* Clear pfp pipe0 reset bit. */
2368 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2371 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2373 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2375 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO,
2376 lower_32_bits(addr2));
2377 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI,
2378 upper_32_bits(addr2));
2380 soc21_grbm_select(adev, 0, 0, 0, 0);
2381 mutex_unlock(&adev->srbm_mutex);
2383 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
2384 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
2385 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
2386 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
2388 /* Invalidate the data caches */
2389 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2390 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2391 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
2393 for (i = 0; i < usec_timeout; i++) {
2394 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2395 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
2396 INVALIDATE_DCACHE_COMPLETE))
2401 if (i >= usec_timeout) {
2402 dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
2409 static int gfx_v11_0_config_mec_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2411 uint32_t usec_timeout = 50000; /* wait for 50ms */
2414 const struct gfx_firmware_header_v2_0 *mec_hdr;
2416 mec_hdr = (const struct gfx_firmware_header_v2_0 *)
2417 adev->gfx.mec_fw->data;
2419 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
2420 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
2421 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2422 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2423 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
2425 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL);
2426 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0);
2427 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0);
2428 WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp);
2430 mutex_lock(&adev->srbm_mutex);
2431 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
2432 soc21_grbm_select(adev, 1, i, 0, 0);
2434 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, addr2);
2435 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI,
2436 upper_32_bits(addr2));
2438 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
2439 mec_hdr->ucode_start_addr_lo >> 2 |
2440 mec_hdr->ucode_start_addr_hi << 30);
2441 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
2442 mec_hdr->ucode_start_addr_hi >> 2);
2444 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, addr);
2445 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
2446 upper_32_bits(addr));
2448 mutex_unlock(&adev->srbm_mutex);
2449 soc21_grbm_select(adev, 0, 0, 0, 0);
2451 /* Trigger an invalidation of the L1 instruction caches */
2452 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
2453 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2454 WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp);
2456 /* Wait for invalidation complete */
2457 for (i = 0; i < usec_timeout; i++) {
2458 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
2459 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL,
2460 INVALIDATE_DCACHE_COMPLETE))
2465 if (i >= usec_timeout) {
2466 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2470 /* Trigger an invalidation of the L1 instruction caches */
2471 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2472 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2473 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
2475 /* Wait for invalidation complete */
2476 for (i = 0; i < usec_timeout; i++) {
2477 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2478 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2479 INVALIDATE_CACHE_COMPLETE))
2484 if (i >= usec_timeout) {
2485 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2492 static void gfx_v11_0_config_gfx_rs64(struct amdgpu_device *adev)
2494 const struct gfx_firmware_header_v2_0 *pfp_hdr;
2495 const struct gfx_firmware_header_v2_0 *me_hdr;
2496 const struct gfx_firmware_header_v2_0 *mec_hdr;
2497 uint32_t pipe_id, tmp;
2499 mec_hdr = (const struct gfx_firmware_header_v2_0 *)
2500 adev->gfx.mec_fw->data;
2501 me_hdr = (const struct gfx_firmware_header_v2_0 *)
2502 adev->gfx.me_fw->data;
2503 pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
2504 adev->gfx.pfp_fw->data;
2506 /* config pfp program start addr */
2507 for (pipe_id = 0; pipe_id < 2; pipe_id++) {
2508 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2509 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
2510 (pfp_hdr->ucode_start_addr_hi << 30) |
2511 (pfp_hdr->ucode_start_addr_lo >> 2));
2512 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
2513 pfp_hdr->ucode_start_addr_hi >> 2);
2515 soc21_grbm_select(adev, 0, 0, 0, 0);
2517 /* reset pfp pipe */
2518 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2519 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 1);
2520 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 1);
2521 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2523 /* clear pfp pipe reset */
2524 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 0);
2525 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 0);
2526 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2528 /* config me program start addr */
2529 for (pipe_id = 0; pipe_id < 2; pipe_id++) {
2530 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2531 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
2532 (me_hdr->ucode_start_addr_hi << 30) |
2533 (me_hdr->ucode_start_addr_lo >> 2) );
2534 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
2535 me_hdr->ucode_start_addr_hi>>2);
2537 soc21_grbm_select(adev, 0, 0, 0, 0);
2540 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2541 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 1);
2542 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 1);
2543 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2545 /* clear me pipe reset */
2546 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 0);
2547 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 0);
2548 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2550 /* config mec program start addr */
2551 for (pipe_id = 0; pipe_id < 4; pipe_id++) {
2552 soc21_grbm_select(adev, 1, pipe_id, 0, 0);
2553 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
2554 mec_hdr->ucode_start_addr_lo >> 2 |
2555 mec_hdr->ucode_start_addr_hi << 30);
2556 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
2557 mec_hdr->ucode_start_addr_hi >> 2);
2559 soc21_grbm_select(adev, 0, 0, 0, 0);
2561 /* reset mec pipe */
2562 tmp = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
2563 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1);
2564 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1);
2565 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1);
2566 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1);
2567 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp);
2569 /* clear mec pipe reset */
2570 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 0);
2571 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 0);
2572 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 0);
2573 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 0);
2574 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp);
2577 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
2580 uint32_t bootload_status;
2582 uint64_t addr, addr2;
2584 for (i = 0; i < adev->usec_timeout; i++) {
2585 cp_status = RREG32_SOC15(GC, 0, regCP_STAT);
2587 if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
2588 IP_VERSION(11, 0, 1) ||
2589 amdgpu_ip_version(adev, GC_HWIP, 0) ==
2590 IP_VERSION(11, 0, 4) ||
2591 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 0))
2592 bootload_status = RREG32_SOC15(GC, 0,
2593 regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1);
2595 bootload_status = RREG32_SOC15(GC, 0, regRLC_RLCS_BOOTLOAD_STATUS);
2597 if ((cp_status == 0) &&
2598 (REG_GET_FIELD(bootload_status,
2599 RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) {
2605 if (i >= adev->usec_timeout) {
2606 dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n");
2610 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
2611 if (adev->gfx.rs64_enable) {
2612 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2613 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME].offset;
2614 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
2615 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME_P0_STACK].offset;
2616 r = gfx_v11_0_config_me_cache_rs64(adev, addr, addr2);
2619 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2620 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP].offset;
2621 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
2622 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK].offset;
2623 r = gfx_v11_0_config_pfp_cache_rs64(adev, addr, addr2);
2626 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2627 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC].offset;
2628 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
2629 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK].offset;
2630 r = gfx_v11_0_config_mec_cache_rs64(adev, addr, addr2);
2634 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2635 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_ME].offset;
2636 r = gfx_v11_0_config_me_cache(adev, addr);
2639 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2640 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_PFP].offset;
2641 r = gfx_v11_0_config_pfp_cache(adev, addr);
2644 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2645 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_MEC].offset;
2646 r = gfx_v11_0_config_mec_cache(adev, addr);
2655 static int gfx_v11_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2658 u32 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2660 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2661 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2662 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2664 for (i = 0; i < adev->usec_timeout; i++) {
2665 if (RREG32_SOC15(GC, 0, regCP_STAT) == 0)
2670 if (i >= adev->usec_timeout)
2671 DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt");
2676 static int gfx_v11_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
2679 const struct gfx_firmware_header_v1_0 *pfp_hdr;
2680 const __le32 *fw_data;
2681 unsigned i, fw_size;
2683 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2684 adev->gfx.pfp_fw->data;
2686 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2688 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
2689 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2690 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes);
2692 r = amdgpu_bo_create_reserved(adev, pfp_hdr->header.ucode_size_bytes,
2693 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2694 &adev->gfx.pfp.pfp_fw_obj,
2695 &adev->gfx.pfp.pfp_fw_gpu_addr,
2696 (void **)&adev->gfx.pfp.pfp_fw_ptr);
2698 dev_err(adev->dev, "(%d) failed to create pfp fw bo\n", r);
2699 gfx_v11_0_pfp_fini(adev);
2703 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_data, fw_size);
2705 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj);
2706 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj);
2708 gfx_v11_0_config_pfp_cache(adev, adev->gfx.pfp.pfp_fw_gpu_addr);
2710 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, 0);
2712 for (i = 0; i < pfp_hdr->jt_size; i++)
2713 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_DATA,
2714 le32_to_cpup(fw_data + pfp_hdr->jt_offset + i));
2716 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2721 static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
2724 const struct gfx_firmware_header_v2_0 *pfp_hdr;
2725 const __le32 *fw_ucode, *fw_data;
2726 unsigned i, pipe_id, fw_ucode_size, fw_data_size;
2728 uint32_t usec_timeout = 50000; /* wait for 50ms */
2730 pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
2731 adev->gfx.pfp_fw->data;
2733 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2736 fw_ucode = (const __le32 *)(adev->gfx.pfp_fw->data +
2737 le32_to_cpu(pfp_hdr->ucode_offset_bytes));
2738 fw_ucode_size = le32_to_cpu(pfp_hdr->ucode_size_bytes);
2740 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
2741 le32_to_cpu(pfp_hdr->data_offset_bytes));
2742 fw_data_size = le32_to_cpu(pfp_hdr->data_size_bytes);
2745 r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
2747 AMDGPU_GEM_DOMAIN_VRAM |
2748 AMDGPU_GEM_DOMAIN_GTT,
2749 &adev->gfx.pfp.pfp_fw_obj,
2750 &adev->gfx.pfp.pfp_fw_gpu_addr,
2751 (void **)&adev->gfx.pfp.pfp_fw_ptr);
2753 dev_err(adev->dev, "(%d) failed to create pfp ucode fw bo\n", r);
2754 gfx_v11_0_pfp_fini(adev);
2758 r = amdgpu_bo_create_reserved(adev, fw_data_size,
2760 AMDGPU_GEM_DOMAIN_VRAM |
2761 AMDGPU_GEM_DOMAIN_GTT,
2762 &adev->gfx.pfp.pfp_fw_data_obj,
2763 &adev->gfx.pfp.pfp_fw_data_gpu_addr,
2764 (void **)&adev->gfx.pfp.pfp_fw_data_ptr);
2766 dev_err(adev->dev, "(%d) failed to create pfp data fw bo\n", r);
2767 gfx_v11_0_pfp_fini(adev);
2771 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_ucode, fw_ucode_size);
2772 memcpy(adev->gfx.pfp.pfp_fw_data_ptr, fw_data, fw_data_size);
2774 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj);
2775 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_data_obj);
2776 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj);
2777 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj);
2779 if (amdgpu_emu_mode == 1)
2780 adev->hdp.funcs->flush_hdp(adev, NULL);
2782 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
2783 lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
2784 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
2785 upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
2787 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
2788 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2789 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2790 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2791 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
2794 * Programming any of the CP_PFP_IC_BASE registers
2795 * forces invalidation of the ME L1 I$. Wait for the
2796 * invalidation complete
2798 for (i = 0; i < usec_timeout; i++) {
2799 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2800 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2801 INVALIDATE_CACHE_COMPLETE))
2806 if (i >= usec_timeout) {
2807 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2811 /* Prime the L1 instruction caches */
2812 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2813 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1);
2814 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
2815 /* Waiting for cache primed*/
2816 for (i = 0; i < usec_timeout; i++) {
2817 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2818 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2824 if (i >= usec_timeout) {
2825 dev_err(adev->dev, "failed to prime instruction cache\n");
2829 mutex_lock(&adev->srbm_mutex);
2830 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2831 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2832 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
2833 (pfp_hdr->ucode_start_addr_hi << 30) |
2834 (pfp_hdr->ucode_start_addr_lo >> 2) );
2835 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
2836 pfp_hdr->ucode_start_addr_hi>>2);
2839 * Program CP_ME_CNTL to reset given PIPE to take
2840 * effect of CP_PFP_PRGRM_CNTR_START.
2842 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2844 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2845 PFP_PIPE0_RESET, 1);
2847 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2848 PFP_PIPE1_RESET, 1);
2849 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2851 /* Clear pfp pipe0 reset bit. */
2853 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2854 PFP_PIPE0_RESET, 0);
2856 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2857 PFP_PIPE1_RESET, 0);
2858 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2860 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO,
2861 lower_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr));
2862 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI,
2863 upper_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr));
2865 soc21_grbm_select(adev, 0, 0, 0, 0);
2866 mutex_unlock(&adev->srbm_mutex);
2868 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
2869 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
2870 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
2871 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
2873 /* Invalidate the data caches */
2874 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2875 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2876 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
2878 for (i = 0; i < usec_timeout; i++) {
2879 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2880 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
2881 INVALIDATE_DCACHE_COMPLETE))
2886 if (i >= usec_timeout) {
2887 dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
2894 static int gfx_v11_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
2897 const struct gfx_firmware_header_v1_0 *me_hdr;
2898 const __le32 *fw_data;
2899 unsigned i, fw_size;
2901 me_hdr = (const struct gfx_firmware_header_v1_0 *)
2902 adev->gfx.me_fw->data;
2904 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2906 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
2907 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2908 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes);
2910 r = amdgpu_bo_create_reserved(adev, me_hdr->header.ucode_size_bytes,
2911 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2912 &adev->gfx.me.me_fw_obj,
2913 &adev->gfx.me.me_fw_gpu_addr,
2914 (void **)&adev->gfx.me.me_fw_ptr);
2916 dev_err(adev->dev, "(%d) failed to create me fw bo\n", r);
2917 gfx_v11_0_me_fini(adev);
2921 memcpy(adev->gfx.me.me_fw_ptr, fw_data, fw_size);
2923 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj);
2924 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj);
2926 gfx_v11_0_config_me_cache(adev, adev->gfx.me.me_fw_gpu_addr);
2928 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, 0);
2930 for (i = 0; i < me_hdr->jt_size; i++)
2931 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_DATA,
2932 le32_to_cpup(fw_data + me_hdr->jt_offset + i));
2934 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, adev->gfx.me_fw_version);
2939 static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
2942 const struct gfx_firmware_header_v2_0 *me_hdr;
2943 const __le32 *fw_ucode, *fw_data;
2944 unsigned i, pipe_id, fw_ucode_size, fw_data_size;
2946 uint32_t usec_timeout = 50000; /* wait for 50ms */
2948 me_hdr = (const struct gfx_firmware_header_v2_0 *)
2949 adev->gfx.me_fw->data;
2951 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2954 fw_ucode = (const __le32 *)(adev->gfx.me_fw->data +
2955 le32_to_cpu(me_hdr->ucode_offset_bytes));
2956 fw_ucode_size = le32_to_cpu(me_hdr->ucode_size_bytes);
2958 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
2959 le32_to_cpu(me_hdr->data_offset_bytes));
2960 fw_data_size = le32_to_cpu(me_hdr->data_size_bytes);
2963 r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
2965 AMDGPU_GEM_DOMAIN_VRAM |
2966 AMDGPU_GEM_DOMAIN_GTT,
2967 &adev->gfx.me.me_fw_obj,
2968 &adev->gfx.me.me_fw_gpu_addr,
2969 (void **)&adev->gfx.me.me_fw_ptr);
2971 dev_err(adev->dev, "(%d) failed to create me ucode bo\n", r);
2972 gfx_v11_0_me_fini(adev);
2976 r = amdgpu_bo_create_reserved(adev, fw_data_size,
2978 AMDGPU_GEM_DOMAIN_VRAM |
2979 AMDGPU_GEM_DOMAIN_GTT,
2980 &adev->gfx.me.me_fw_data_obj,
2981 &adev->gfx.me.me_fw_data_gpu_addr,
2982 (void **)&adev->gfx.me.me_fw_data_ptr);
2984 dev_err(adev->dev, "(%d) failed to create me data bo\n", r);
2985 gfx_v11_0_pfp_fini(adev);
2989 memcpy(adev->gfx.me.me_fw_ptr, fw_ucode, fw_ucode_size);
2990 memcpy(adev->gfx.me.me_fw_data_ptr, fw_data, fw_data_size);
2992 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj);
2993 amdgpu_bo_kunmap(adev->gfx.me.me_fw_data_obj);
2994 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj);
2995 amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj);
2997 if (amdgpu_emu_mode == 1)
2998 adev->hdp.funcs->flush_hdp(adev, NULL);
3000 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
3001 lower_32_bits(adev->gfx.me.me_fw_gpu_addr));
3002 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
3003 upper_32_bits(adev->gfx.me.me_fw_gpu_addr));
3005 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
3006 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
3007 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
3008 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
3009 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
3012 * Programming any of the CP_ME_IC_BASE registers
3013 * forces invalidation of the ME L1 I$. Wait for the
3014 * invalidation complete
3016 for (i = 0; i < usec_timeout; i++) {
3017 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
3018 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
3019 INVALIDATE_CACHE_COMPLETE))
3024 if (i >= usec_timeout) {
3025 dev_err(adev->dev, "failed to invalidate instruction cache\n");
3029 /* Prime the instruction caches */
3030 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
3031 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1);
3032 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
3034 /* Waiting for instruction cache primed*/
3035 for (i = 0; i < usec_timeout; i++) {
3036 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
3037 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
3043 if (i >= usec_timeout) {
3044 dev_err(adev->dev, "failed to prime instruction cache\n");
3048 mutex_lock(&adev->srbm_mutex);
3049 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
3050 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
3051 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
3052 (me_hdr->ucode_start_addr_hi << 30) |
3053 (me_hdr->ucode_start_addr_lo >> 2) );
3054 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
3055 me_hdr->ucode_start_addr_hi>>2);
3058 * Program CP_ME_CNTL to reset given PIPE to take
3059 * effect of CP_PFP_PRGRM_CNTR_START.
3061 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
3063 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3066 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3068 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3070 /* Clear pfp pipe0 reset bit. */
3072 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3075 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3077 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3079 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO,
3080 lower_32_bits(adev->gfx.me.me_fw_data_gpu_addr));
3081 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI,
3082 upper_32_bits(adev->gfx.me.me_fw_data_gpu_addr));
3084 soc21_grbm_select(adev, 0, 0, 0, 0);
3085 mutex_unlock(&adev->srbm_mutex);
3087 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
3088 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
3089 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
3090 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
3092 /* Invalidate the data caches */
3093 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3094 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
3095 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
3097 for (i = 0; i < usec_timeout; i++) {
3098 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3099 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
3100 INVALIDATE_DCACHE_COMPLETE))
3105 if (i >= usec_timeout) {
3106 dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
3113 static int gfx_v11_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
3117 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw)
3120 gfx_v11_0_cp_gfx_enable(adev, false);
3122 if (adev->gfx.rs64_enable)
3123 r = gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(adev);
3125 r = gfx_v11_0_cp_gfx_load_pfp_microcode(adev);
3127 dev_err(adev->dev, "(%d) failed to load pfp fw\n", r);
3131 if (adev->gfx.rs64_enable)
3132 r = gfx_v11_0_cp_gfx_load_me_microcode_rs64(adev);
3134 r = gfx_v11_0_cp_gfx_load_me_microcode(adev);
3136 dev_err(adev->dev, "(%d) failed to load me fw\n", r);
3143 static int gfx_v11_0_cp_gfx_start(struct amdgpu_device *adev)
3145 struct amdgpu_ring *ring;
3146 const struct cs_section_def *sect = NULL;
3147 const struct cs_extent_def *ext = NULL;
3152 WREG32_SOC15(GC, 0, regCP_MAX_CONTEXT,
3153 adev->gfx.config.max_hw_contexts - 1);
3154 WREG32_SOC15(GC, 0, regCP_DEVICE_ID, 1);
3156 if (!amdgpu_async_gfx_ring)
3157 gfx_v11_0_cp_gfx_enable(adev, true);
3159 ring = &adev->gfx.gfx_ring[0];
3160 r = amdgpu_ring_alloc(ring, gfx_v11_0_get_csb_size(adev));
3162 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3166 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3167 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3169 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3170 amdgpu_ring_write(ring, 0x80000000);
3171 amdgpu_ring_write(ring, 0x80000000);
3173 for (sect = gfx11_cs_data; sect->section != NULL; ++sect) {
3174 for (ext = sect->section; ext->extent != NULL; ++ext) {
3175 if (sect->id == SECT_CONTEXT) {
3176 amdgpu_ring_write(ring,
3177 PACKET3(PACKET3_SET_CONTEXT_REG,
3179 amdgpu_ring_write(ring, ext->reg_index -
3180 PACKET3_SET_CONTEXT_REG_START);
3181 for (i = 0; i < ext->reg_count; i++)
3182 amdgpu_ring_write(ring, ext->extent[i]);
3188 SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
3189 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
3190 amdgpu_ring_write(ring, ctx_reg_offset);
3191 amdgpu_ring_write(ring, adev->gfx.config.pa_sc_tile_steering_override);
3193 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3194 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3196 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3197 amdgpu_ring_write(ring, 0);
3199 amdgpu_ring_commit(ring);
3201 /* submit cs packet to copy state 0 to next available state */
3202 if (adev->gfx.num_gfx_rings > 1) {
3203 /* maximum supported gfx ring is 2 */
3204 ring = &adev->gfx.gfx_ring[1];
3205 r = amdgpu_ring_alloc(ring, 2);
3207 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3211 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3212 amdgpu_ring_write(ring, 0);
3214 amdgpu_ring_commit(ring);
3219 static void gfx_v11_0_cp_gfx_switch_pipe(struct amdgpu_device *adev,
3224 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
3225 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe);
3227 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
3230 static void gfx_v11_0_cp_gfx_set_doorbell(struct amdgpu_device *adev,
3231 struct amdgpu_ring *ring)
3235 tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL);
3236 if (ring->use_doorbell) {
3237 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3238 DOORBELL_OFFSET, ring->doorbell_index);
3239 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3242 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3245 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, tmp);
3247 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
3248 DOORBELL_RANGE_LOWER, ring->doorbell_index);
3249 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, tmp);
3251 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER,
3252 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
3255 static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev)
3257 struct amdgpu_ring *ring;
3260 u64 rb_addr, rptr_addr, wptr_gpu_addr;
3262 /* Set the write pointer delay */
3263 WREG32_SOC15(GC, 0, regCP_RB_WPTR_DELAY, 0);
3265 /* set the RB to use vmid 0 */
3266 WREG32_SOC15(GC, 0, regCP_RB_VMID, 0);
3268 /* Init gfx ring 0 for pipe 0 */
3269 mutex_lock(&adev->srbm_mutex);
3270 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
3272 /* Set ring buffer size */
3273 ring = &adev->gfx.gfx_ring[0];
3274 rb_bufsz = order_base_2(ring->ring_size / 8);
3275 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
3276 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
3277 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp);
3279 /* Initialize the ring buffer's write pointers */
3281 WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr));
3282 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3284 /* set the wb address wether it's enabled or not */
3285 rptr_addr = ring->rptr_gpu_addr;
3286 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
3287 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
3288 CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3290 wptr_gpu_addr = ring->wptr_gpu_addr;
3291 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO,
3292 lower_32_bits(wptr_gpu_addr));
3293 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI,
3294 upper_32_bits(wptr_gpu_addr));
3297 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp);
3299 rb_addr = ring->gpu_addr >> 8;
3300 WREG32_SOC15(GC, 0, regCP_RB0_BASE, rb_addr);
3301 WREG32_SOC15(GC, 0, regCP_RB0_BASE_HI, upper_32_bits(rb_addr));
3303 WREG32_SOC15(GC, 0, regCP_RB_ACTIVE, 1);
3305 gfx_v11_0_cp_gfx_set_doorbell(adev, ring);
3306 mutex_unlock(&adev->srbm_mutex);
3308 /* Init gfx ring 1 for pipe 1 */
3309 if (adev->gfx.num_gfx_rings > 1) {
3310 mutex_lock(&adev->srbm_mutex);
3311 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
3312 /* maximum supported gfx ring is 2 */
3313 ring = &adev->gfx.gfx_ring[1];
3314 rb_bufsz = order_base_2(ring->ring_size / 8);
3315 tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
3316 tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
3317 WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp);
3318 /* Initialize the ring buffer's write pointers */
3320 WREG32_SOC15(GC, 0, regCP_RB1_WPTR, lower_32_bits(ring->wptr));
3321 WREG32_SOC15(GC, 0, regCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
3322 /* Set the wb address wether it's enabled or not */
3323 rptr_addr = ring->rptr_gpu_addr;
3324 WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
3325 WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
3326 CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3327 wptr_gpu_addr = ring->wptr_gpu_addr;
3328 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO,
3329 lower_32_bits(wptr_gpu_addr));
3330 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI,
3331 upper_32_bits(wptr_gpu_addr));
3334 WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp);
3336 rb_addr = ring->gpu_addr >> 8;
3337 WREG32_SOC15(GC, 0, regCP_RB1_BASE, rb_addr);
3338 WREG32_SOC15(GC, 0, regCP_RB1_BASE_HI, upper_32_bits(rb_addr));
3339 WREG32_SOC15(GC, 0, regCP_RB1_ACTIVE, 1);
3341 gfx_v11_0_cp_gfx_set_doorbell(adev, ring);
3342 mutex_unlock(&adev->srbm_mutex);
3344 /* Switch to pipe 0 */
3345 mutex_lock(&adev->srbm_mutex);
3346 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
3347 mutex_unlock(&adev->srbm_mutex);
3349 /* start the ring */
3350 gfx_v11_0_cp_gfx_start(adev);
3355 static void gfx_v11_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
3359 if (adev->gfx.rs64_enable) {
3360 data = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
3361 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE,
3363 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET,
3365 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET,
3367 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET,
3369 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET,
3371 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE,
3373 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE,
3375 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE,
3377 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE,
3379 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT,
3381 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, data);
3383 data = RREG32_SOC15(GC, 0, regCP_MEC_CNTL);
3386 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 0);
3387 if (!adev->enable_mes_kiq)
3388 data = REG_SET_FIELD(data, CP_MEC_CNTL,
3391 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 1);
3392 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME2_HALT, 1);
3394 WREG32_SOC15(GC, 0, regCP_MEC_CNTL, data);
3400 static int gfx_v11_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3402 const struct gfx_firmware_header_v1_0 *mec_hdr;
3403 const __le32 *fw_data;
3404 unsigned i, fw_size;
3408 if (!adev->gfx.mec_fw)
3411 gfx_v11_0_cp_compute_enable(adev, false);
3413 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3414 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3416 fw_data = (const __le32 *)
3417 (adev->gfx.mec_fw->data +
3418 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
3419 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
3421 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
3422 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
3423 &adev->gfx.mec.mec_fw_obj,
3424 &adev->gfx.mec.mec_fw_gpu_addr,
3427 dev_err(adev->dev, "(%d) failed to create mec fw bo\n", r);
3428 gfx_v11_0_mec_fini(adev);
3432 memcpy(fw, fw_data, fw_size);
3434 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
3435 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
3437 gfx_v11_0_config_mec_cache(adev, adev->gfx.mec.mec_fw_gpu_addr);
3440 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, 0);
3442 for (i = 0; i < mec_hdr->jt_size; i++)
3443 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_DATA,
3444 le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
3446 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version);
3451 static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev)
3453 const struct gfx_firmware_header_v2_0 *mec_hdr;
3454 const __le32 *fw_ucode, *fw_data;
3455 u32 tmp, fw_ucode_size, fw_data_size;
3456 u32 i, usec_timeout = 50000; /* Wait for 50 ms */
3457 u32 *fw_ucode_ptr, *fw_data_ptr;
3460 if (!adev->gfx.mec_fw)
3463 gfx_v11_0_cp_compute_enable(adev, false);
3465 mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
3466 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3468 fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data +
3469 le32_to_cpu(mec_hdr->ucode_offset_bytes));
3470 fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes);
3472 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
3473 le32_to_cpu(mec_hdr->data_offset_bytes));
3474 fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes);
3476 r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
3478 AMDGPU_GEM_DOMAIN_VRAM |
3479 AMDGPU_GEM_DOMAIN_GTT,
3480 &adev->gfx.mec.mec_fw_obj,
3481 &adev->gfx.mec.mec_fw_gpu_addr,
3482 (void **)&fw_ucode_ptr);
3484 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
3485 gfx_v11_0_mec_fini(adev);
3489 r = amdgpu_bo_create_reserved(adev, fw_data_size,
3491 AMDGPU_GEM_DOMAIN_VRAM |
3492 AMDGPU_GEM_DOMAIN_GTT,
3493 &adev->gfx.mec.mec_fw_data_obj,
3494 &adev->gfx.mec.mec_fw_data_gpu_addr,
3495 (void **)&fw_data_ptr);
3497 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
3498 gfx_v11_0_mec_fini(adev);
3502 memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size);
3503 memcpy(fw_data_ptr, fw_data, fw_data_size);
3505 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
3506 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj);
3507 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
3508 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj);
3510 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
3511 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
3512 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
3513 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
3514 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
3516 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL);
3517 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0);
3518 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0);
3519 WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp);
3521 mutex_lock(&adev->srbm_mutex);
3522 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
3523 soc21_grbm_select(adev, 1, i, 0, 0);
3525 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, adev->gfx.mec.mec_fw_data_gpu_addr);
3526 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI,
3527 upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr));
3529 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
3530 mec_hdr->ucode_start_addr_lo >> 2 |
3531 mec_hdr->ucode_start_addr_hi << 30);
3532 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
3533 mec_hdr->ucode_start_addr_hi >> 2);
3535 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, adev->gfx.mec.mec_fw_gpu_addr);
3536 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
3537 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
3539 mutex_unlock(&adev->srbm_mutex);
3540 soc21_grbm_select(adev, 0, 0, 0, 0);
3542 /* Trigger an invalidation of the L1 instruction caches */
3543 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
3544 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
3545 WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp);
3547 /* Wait for invalidation complete */
3548 for (i = 0; i < usec_timeout; i++) {
3549 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
3550 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL,
3551 INVALIDATE_DCACHE_COMPLETE))
3556 if (i >= usec_timeout) {
3557 dev_err(adev->dev, "failed to invalidate instruction cache\n");
3561 /* Trigger an invalidation of the L1 instruction caches */
3562 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
3563 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
3564 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
3566 /* Wait for invalidation complete */
3567 for (i = 0; i < usec_timeout; i++) {
3568 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
3569 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
3570 INVALIDATE_CACHE_COMPLETE))
3575 if (i >= usec_timeout) {
3576 dev_err(adev->dev, "failed to invalidate instruction cache\n");
3583 static void gfx_v11_0_kiq_setting(struct amdgpu_ring *ring)
3586 struct amdgpu_device *adev = ring->adev;
3588 /* tell RLC which is KIQ queue */
3589 tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
3591 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
3592 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
3594 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
3597 static void gfx_v11_0_cp_set_doorbell_range(struct amdgpu_device *adev)
3599 /* set graphics engine doorbell range */
3600 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER,
3601 (adev->doorbell_index.gfx_ring0 * 2) << 2);
3602 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER,
3603 (adev->doorbell_index.gfx_userqueue_end * 2) << 2);
3605 /* set compute engine doorbell range */
3606 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER,
3607 (adev->doorbell_index.kiq * 2) << 2);
3608 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER,
3609 (adev->doorbell_index.userqueue_end * 2) << 2);
3612 static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
3613 struct amdgpu_mqd_prop *prop)
3615 struct v11_gfx_mqd *mqd = m;
3616 uint64_t hqd_gpu_addr, wb_gpu_addr;
3620 /* set up gfx hqd wptr */
3621 mqd->cp_gfx_hqd_wptr = 0;
3622 mqd->cp_gfx_hqd_wptr_hi = 0;
3624 /* set the pointer to the MQD */
3625 mqd->cp_mqd_base_addr = prop->mqd_gpu_addr & 0xfffffffc;
3626 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
3628 /* set up mqd control */
3629 tmp = RREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL);
3630 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0);
3631 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1);
3632 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0);
3633 mqd->cp_gfx_mqd_control = tmp;
3635 /* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */
3636 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID);
3637 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0);
3638 mqd->cp_gfx_hqd_vmid = 0;
3640 /* set up default queue priority level
3641 * 0x0 = low priority, 0x1 = high priority */
3642 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY);
3643 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, 0);
3644 mqd->cp_gfx_hqd_queue_priority = tmp;
3646 /* set up time quantum */
3647 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM);
3648 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1);
3649 mqd->cp_gfx_hqd_quantum = tmp;
3651 /* set up gfx hqd base. this is similar as CP_RB_BASE */
3652 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8;
3653 mqd->cp_gfx_hqd_base = hqd_gpu_addr;
3654 mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr);
3656 /* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */
3657 wb_gpu_addr = prop->rptr_gpu_addr;
3658 mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc;
3659 mqd->cp_gfx_hqd_rptr_addr_hi =
3660 upper_32_bits(wb_gpu_addr) & 0xffff;
3662 /* set up rb_wptr_poll addr */
3663 wb_gpu_addr = prop->wptr_gpu_addr;
3664 mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3665 mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3667 /* set up the gfx_hqd_control, similar as CP_RB0_CNTL */
3668 rb_bufsz = order_base_2(prop->queue_size / 4) - 1;
3669 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL);
3670 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz);
3671 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2);
3673 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1);
3675 mqd->cp_gfx_hqd_cntl = tmp;
3677 /* set up cp_doorbell_control */
3678 tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL);
3679 if (prop->use_doorbell) {
3680 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3681 DOORBELL_OFFSET, prop->doorbell_index);
3682 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3685 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3687 mqd->cp_rb_doorbell_control = tmp;
3689 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3690 mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR);
3692 /* active the queue */
3693 mqd->cp_gfx_hqd_active = 1;
3698 static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring)
3700 struct amdgpu_device *adev = ring->adev;
3701 struct v11_gfx_mqd *mqd = ring->mqd_ptr;
3702 int mqd_idx = ring - &adev->gfx.gfx_ring[0];
3704 if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
3705 memset((void *)mqd, 0, sizeof(*mqd));
3706 mutex_lock(&adev->srbm_mutex);
3707 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3708 amdgpu_ring_init_mqd(ring);
3709 soc21_grbm_select(adev, 0, 0, 0, 0);
3710 mutex_unlock(&adev->srbm_mutex);
3711 if (adev->gfx.me.mqd_backup[mqd_idx])
3712 memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
3714 /* restore mqd with the backup copy */
3715 if (adev->gfx.me.mqd_backup[mqd_idx])
3716 memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
3717 /* reset the ring */
3719 *ring->wptr_cpu_addr = 0;
3720 amdgpu_ring_clear_ring(ring);
3726 static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
3729 struct amdgpu_ring *ring;
3731 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3732 ring = &adev->gfx.gfx_ring[i];
3734 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3735 if (unlikely(r != 0))
3738 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3740 r = gfx_v11_0_gfx_init_queue(ring);
3741 amdgpu_bo_kunmap(ring->mqd_obj);
3742 ring->mqd_ptr = NULL;
3744 amdgpu_bo_unreserve(ring->mqd_obj);
3749 r = amdgpu_gfx_enable_kgq(adev, 0);
3753 return gfx_v11_0_cp_gfx_start(adev);
3756 static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
3757 struct amdgpu_mqd_prop *prop)
3759 struct v11_compute_mqd *mqd = m;
3760 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
3763 mqd->header = 0xC0310800;
3764 mqd->compute_pipelinestat_enable = 0x00000001;
3765 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3766 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3767 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3768 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3769 mqd->compute_misc_reserved = 0x00000007;
3771 eop_base_addr = prop->eop_gpu_addr >> 8;
3772 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
3773 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
3775 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3776 tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL);
3777 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3778 (order_base_2(GFX11_MEC_HPD_SIZE / 4) - 1));
3780 mqd->cp_hqd_eop_control = tmp;
3782 /* enable doorbell? */
3783 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
3785 if (prop->use_doorbell) {
3786 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3787 DOORBELL_OFFSET, prop->doorbell_index);
3788 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3790 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3791 DOORBELL_SOURCE, 0);
3792 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3795 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3799 mqd->cp_hqd_pq_doorbell_control = tmp;
3801 /* disable the queue if it's active */
3802 mqd->cp_hqd_dequeue_request = 0;
3803 mqd->cp_hqd_pq_rptr = 0;
3804 mqd->cp_hqd_pq_wptr_lo = 0;
3805 mqd->cp_hqd_pq_wptr_hi = 0;
3807 /* set the pointer to the MQD */
3808 mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc;
3809 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
3811 /* set MQD vmid to 0 */
3812 tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL);
3813 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3814 mqd->cp_mqd_control = tmp;
3816 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3817 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8;
3818 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3819 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3821 /* set up the HQD, this is similar to CP_RB0_CNTL */
3822 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL);
3823 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3824 (order_base_2(prop->queue_size / 4) - 1));
3825 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3826 (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
3827 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
3828 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH,
3829 prop->allow_tunneling);
3830 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3831 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3832 mqd->cp_hqd_pq_control = tmp;
3834 /* set the wb address whether it's enabled or not */
3835 wb_gpu_addr = prop->rptr_gpu_addr;
3836 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3837 mqd->cp_hqd_pq_rptr_report_addr_hi =
3838 upper_32_bits(wb_gpu_addr) & 0xffff;
3840 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3841 wb_gpu_addr = prop->wptr_gpu_addr;
3842 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3843 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3846 /* enable the doorbell if requested */
3847 if (prop->use_doorbell) {
3848 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
3849 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3850 DOORBELL_OFFSET, prop->doorbell_index);
3852 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3854 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3855 DOORBELL_SOURCE, 0);
3856 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3860 mqd->cp_hqd_pq_doorbell_control = tmp;
3862 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3863 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR);
3865 /* set the vmid for the queue */
3866 mqd->cp_hqd_vmid = 0;
3868 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE);
3869 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55);
3870 mqd->cp_hqd_persistent_state = tmp;
3872 /* set MIN_IB_AVAIL_SIZE */
3873 tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL);
3874 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
3875 mqd->cp_hqd_ib_control = tmp;
3877 /* set static priority for a compute queue/ring */
3878 mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority;
3879 mqd->cp_hqd_queue_priority = prop->hqd_queue_priority;
3881 mqd->cp_hqd_active = prop->hqd_active;
3886 static int gfx_v11_0_kiq_init_register(struct amdgpu_ring *ring)
3888 struct amdgpu_device *adev = ring->adev;
3889 struct v11_compute_mqd *mqd = ring->mqd_ptr;
3892 /* inactivate the queue */
3893 if (amdgpu_sriov_vf(adev))
3894 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 0);
3896 /* disable wptr polling */
3897 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3899 /* write the EOP addr */
3900 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR,
3901 mqd->cp_hqd_eop_base_addr_lo);
3902 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI,
3903 mqd->cp_hqd_eop_base_addr_hi);
3905 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3906 WREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL,
3907 mqd->cp_hqd_eop_control);
3909 /* enable doorbell? */
3910 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL,
3911 mqd->cp_hqd_pq_doorbell_control);
3913 /* disable the queue if it's active */
3914 if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) {
3915 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1);
3916 for (j = 0; j < adev->usec_timeout; j++) {
3917 if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
3921 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST,
3922 mqd->cp_hqd_dequeue_request);
3923 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR,
3924 mqd->cp_hqd_pq_rptr);
3925 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO,
3926 mqd->cp_hqd_pq_wptr_lo);
3927 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI,
3928 mqd->cp_hqd_pq_wptr_hi);
3931 /* set the pointer to the MQD */
3932 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR,
3933 mqd->cp_mqd_base_addr_lo);
3934 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI,
3935 mqd->cp_mqd_base_addr_hi);
3937 /* set MQD vmid to 0 */
3938 WREG32_SOC15(GC, 0, regCP_MQD_CONTROL,
3939 mqd->cp_mqd_control);
3941 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3942 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE,
3943 mqd->cp_hqd_pq_base_lo);
3944 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI,
3945 mqd->cp_hqd_pq_base_hi);
3947 /* set up the HQD, this is similar to CP_RB0_CNTL */
3948 WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL,
3949 mqd->cp_hqd_pq_control);
3951 /* set the wb address whether it's enabled or not */
3952 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR,
3953 mqd->cp_hqd_pq_rptr_report_addr_lo);
3954 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3955 mqd->cp_hqd_pq_rptr_report_addr_hi);
3957 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3958 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR,
3959 mqd->cp_hqd_pq_wptr_poll_addr_lo);
3960 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3961 mqd->cp_hqd_pq_wptr_poll_addr_hi);
3963 /* enable the doorbell if requested */
3964 if (ring->use_doorbell) {
3965 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER,
3966 (adev->doorbell_index.kiq * 2) << 2);
3967 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER,
3968 (adev->doorbell_index.userqueue_end * 2) << 2);
3971 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL,
3972 mqd->cp_hqd_pq_doorbell_control);
3974 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3975 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO,
3976 mqd->cp_hqd_pq_wptr_lo);
3977 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI,
3978 mqd->cp_hqd_pq_wptr_hi);
3980 /* set the vmid for the queue */
3981 WREG32_SOC15(GC, 0, regCP_HQD_VMID, mqd->cp_hqd_vmid);
3983 WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE,
3984 mqd->cp_hqd_persistent_state);
3986 /* activate the queue */
3987 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE,
3988 mqd->cp_hqd_active);
3990 if (ring->use_doorbell)
3991 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3996 static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
3998 struct amdgpu_device *adev = ring->adev;
3999 struct v11_compute_mqd *mqd = ring->mqd_ptr;
4001 gfx_v11_0_kiq_setting(ring);
4003 if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
4004 /* reset MQD to a clean status */
4005 if (adev->gfx.kiq[0].mqd_backup)
4006 memcpy_toio(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
4008 /* reset ring buffer */
4010 amdgpu_ring_clear_ring(ring);
4012 mutex_lock(&adev->srbm_mutex);
4013 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4014 gfx_v11_0_kiq_init_register(ring);
4015 soc21_grbm_select(adev, 0, 0, 0, 0);
4016 mutex_unlock(&adev->srbm_mutex);
4018 memset((void *)mqd, 0, sizeof(*mqd));
4019 if (amdgpu_sriov_vf(adev) && adev->in_suspend)
4020 amdgpu_ring_clear_ring(ring);
4021 mutex_lock(&adev->srbm_mutex);
4022 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4023 amdgpu_ring_init_mqd(ring);
4024 gfx_v11_0_kiq_init_register(ring);
4025 soc21_grbm_select(adev, 0, 0, 0, 0);
4026 mutex_unlock(&adev->srbm_mutex);
4028 if (adev->gfx.kiq[0].mqd_backup)
4029 memcpy_fromio(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
4035 static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring)
4037 struct amdgpu_device *adev = ring->adev;
4038 struct v11_compute_mqd *mqd = ring->mqd_ptr;
4039 int mqd_idx = ring - &adev->gfx.compute_ring[0];
4041 if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
4042 memset((void *)mqd, 0, sizeof(*mqd));
4043 mutex_lock(&adev->srbm_mutex);
4044 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4045 amdgpu_ring_init_mqd(ring);
4046 soc21_grbm_select(adev, 0, 0, 0, 0);
4047 mutex_unlock(&adev->srbm_mutex);
4049 if (adev->gfx.mec.mqd_backup[mqd_idx])
4050 memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
4052 /* restore MQD to a clean status */
4053 if (adev->gfx.mec.mqd_backup[mqd_idx])
4054 memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
4055 /* reset ring buffer */
4057 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
4058 amdgpu_ring_clear_ring(ring);
4064 static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev)
4066 struct amdgpu_ring *ring;
4069 ring = &adev->gfx.kiq[0].ring;
4071 r = amdgpu_bo_reserve(ring->mqd_obj, false);
4072 if (unlikely(r != 0))
4075 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
4076 if (unlikely(r != 0)) {
4077 amdgpu_bo_unreserve(ring->mqd_obj);
4081 gfx_v11_0_kiq_init_queue(ring);
4082 amdgpu_bo_kunmap(ring->mqd_obj);
4083 ring->mqd_ptr = NULL;
4084 amdgpu_bo_unreserve(ring->mqd_obj);
4085 ring->sched.ready = true;
4089 static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev)
4091 struct amdgpu_ring *ring = NULL;
4094 if (!amdgpu_async_gfx_ring)
4095 gfx_v11_0_cp_compute_enable(adev, true);
4097 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4098 ring = &adev->gfx.compute_ring[i];
4100 r = amdgpu_bo_reserve(ring->mqd_obj, false);
4101 if (unlikely(r != 0))
4103 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
4105 r = gfx_v11_0_kcq_init_queue(ring);
4106 amdgpu_bo_kunmap(ring->mqd_obj);
4107 ring->mqd_ptr = NULL;
4109 amdgpu_bo_unreserve(ring->mqd_obj);
4114 r = amdgpu_gfx_enable_kcq(adev, 0);
4119 static int gfx_v11_0_cp_resume(struct amdgpu_device *adev)
4122 struct amdgpu_ring *ring;
4124 if (!(adev->flags & AMD_IS_APU))
4125 gfx_v11_0_enable_gui_idle_interrupt(adev, false);
4127 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
4128 /* legacy firmware loading */
4129 r = gfx_v11_0_cp_gfx_load_microcode(adev);
4133 if (adev->gfx.rs64_enable)
4134 r = gfx_v11_0_cp_compute_load_microcode_rs64(adev);
4136 r = gfx_v11_0_cp_compute_load_microcode(adev);
4141 gfx_v11_0_cp_set_doorbell_range(adev);
4143 if (amdgpu_async_gfx_ring) {
4144 gfx_v11_0_cp_compute_enable(adev, true);
4145 gfx_v11_0_cp_gfx_enable(adev, true);
4148 if (adev->enable_mes_kiq && adev->mes.kiq_hw_init)
4149 r = amdgpu_mes_kiq_hw_init(adev);
4151 r = gfx_v11_0_kiq_resume(adev);
4155 r = gfx_v11_0_kcq_resume(adev);
4159 if (!amdgpu_async_gfx_ring) {
4160 r = gfx_v11_0_cp_gfx_resume(adev);
4164 r = gfx_v11_0_cp_async_gfx_ring_resume(adev);
4169 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4170 ring = &adev->gfx.gfx_ring[i];
4171 r = amdgpu_ring_test_helper(ring);
4176 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4177 ring = &adev->gfx.compute_ring[i];
4178 r = amdgpu_ring_test_helper(ring);
4186 static void gfx_v11_0_cp_enable(struct amdgpu_device *adev, bool enable)
4188 gfx_v11_0_cp_gfx_enable(adev, enable);
4189 gfx_v11_0_cp_compute_enable(adev, enable);
4192 static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev)
4197 r = adev->gfxhub.funcs->gart_enable(adev);
4201 adev->hdp.funcs->flush_hdp(adev, NULL);
4203 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
4206 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
4207 amdgpu_gmc_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0);
4212 static void gfx_v11_0_select_cp_fw_arch(struct amdgpu_device *adev)
4217 if (adev->gfx.rs64_enable) {
4218 tmp = RREG32_SOC15(GC, 0, regCP_GFX_CNTL);
4219 tmp = REG_SET_FIELD(tmp, CP_GFX_CNTL, ENGINE_SEL, 1);
4220 WREG32_SOC15(GC, 0, regCP_GFX_CNTL, tmp);
4222 tmp = RREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL);
4223 tmp = REG_SET_FIELD(tmp, CP_MEC_ISA_CNTL, ISA_MODE, 1);
4224 WREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL, tmp);
4227 if (amdgpu_emu_mode == 1)
4231 static int get_gb_addr_config(struct amdgpu_device * adev)
4235 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
4236 if (gb_addr_config == 0)
4239 adev->gfx.config.gb_addr_config_fields.num_pkrs =
4240 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
4242 adev->gfx.config.gb_addr_config = gb_addr_config;
4244 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
4245 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4246 GB_ADDR_CONFIG, NUM_PIPES);
4248 adev->gfx.config.max_tile_pipes =
4249 adev->gfx.config.gb_addr_config_fields.num_pipes;
4251 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
4252 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4253 GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS);
4254 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
4255 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4256 GB_ADDR_CONFIG, NUM_RB_PER_SE);
4257 adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
4258 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4259 GB_ADDR_CONFIG, NUM_SHADER_ENGINES);
4260 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
4261 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4262 GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE));
4267 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev)
4271 data = RREG32_SOC15(GC, 0, regCPC_PSP_DEBUG);
4272 data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK;
4273 WREG32_SOC15(GC, 0, regCPC_PSP_DEBUG, data);
4275 data = RREG32_SOC15(GC, 0, regCPG_PSP_DEBUG);
4276 data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK;
4277 WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data);
4280 static int gfx_v11_0_hw_init(void *handle)
4283 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4285 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
4286 if (adev->gfx.imu.funcs) {
4287 /* RLC autoload sequence 1: Program rlc ram */
4288 if (adev->gfx.imu.funcs->program_rlc_ram)
4289 adev->gfx.imu.funcs->program_rlc_ram(adev);
4291 /* rlc autoload firmware */
4292 r = gfx_v11_0_rlc_backdoor_autoload_enable(adev);
4296 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
4297 if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) {
4298 if (adev->gfx.imu.funcs->load_microcode)
4299 adev->gfx.imu.funcs->load_microcode(adev);
4300 if (adev->gfx.imu.funcs->setup_imu)
4301 adev->gfx.imu.funcs->setup_imu(adev);
4302 if (adev->gfx.imu.funcs->start_imu)
4303 adev->gfx.imu.funcs->start_imu(adev);
4306 /* disable gpa mode in backdoor loading */
4307 gfx_v11_0_disable_gpa_mode(adev);
4311 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) ||
4312 (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
4313 r = gfx_v11_0_wait_for_rlc_autoload_complete(adev);
4315 dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r);
4320 adev->gfx.is_poweron = true;
4322 if(get_gb_addr_config(adev))
4323 DRM_WARN("Invalid gb_addr_config !\n");
4325 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
4326 adev->gfx.rs64_enable)
4327 gfx_v11_0_config_gfx_rs64(adev);
4329 r = gfx_v11_0_gfxhub_enable(adev);
4333 if (!amdgpu_emu_mode)
4334 gfx_v11_0_init_golden_registers(adev);
4336 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
4337 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
4339 * For gfx 11, rlc firmware loading relies on smu firmware is
4340 * loaded firstly, so in direct type, it has to load smc ucode
4343 if (!(adev->flags & AMD_IS_APU)) {
4344 r = amdgpu_pm_load_smu_firmware(adev, NULL);
4350 gfx_v11_0_constants_init(adev);
4352 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
4353 gfx_v11_0_select_cp_fw_arch(adev);
4355 if (adev->nbio.funcs->gc_doorbell_init)
4356 adev->nbio.funcs->gc_doorbell_init(adev);
4358 r = gfx_v11_0_rlc_resume(adev);
4363 * init golden registers and rlc resume may override some registers,
4364 * reconfig them here
4366 gfx_v11_0_tcp_harvest(adev);
4368 r = gfx_v11_0_cp_resume(adev);
4372 /* get IMU version from HW if it's not set */
4373 if (!adev->gfx.imu_fw_version)
4374 adev->gfx.imu_fw_version = RREG32_SOC15(GC, 0, regGFX_IMU_SCRATCH_0);
4379 static int gfx_v11_0_hw_fini(void *handle)
4381 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4383 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4384 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4386 if (!adev->no_hw_access) {
4387 if (amdgpu_async_gfx_ring) {
4388 if (amdgpu_gfx_disable_kgq(adev, 0))
4389 DRM_ERROR("KGQ disable failed\n");
4392 if (amdgpu_gfx_disable_kcq(adev, 0))
4393 DRM_ERROR("KCQ disable failed\n");
4395 amdgpu_mes_kiq_hw_fini(adev);
4398 if (amdgpu_sriov_vf(adev))
4399 /* Remove the steps disabling CPG and clearing KIQ position,
4400 * so that CP could perform IDLE-SAVE during switch. Those
4401 * steps are necessary to avoid a DMAR error in gfx9 but it is
4402 * not reproduced on gfx11.
4406 gfx_v11_0_cp_enable(adev, false);
4407 gfx_v11_0_enable_gui_idle_interrupt(adev, false);
4409 adev->gfxhub.funcs->gart_disable(adev);
4411 adev->gfx.is_poweron = false;
4416 static int gfx_v11_0_suspend(void *handle)
4418 return gfx_v11_0_hw_fini(handle);
4421 static int gfx_v11_0_resume(void *handle)
4423 return gfx_v11_0_hw_init(handle);
4426 static bool gfx_v11_0_is_idle(void *handle)
4428 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4430 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS),
4431 GRBM_STATUS, GUI_ACTIVE))
4437 static int gfx_v11_0_wait_for_idle(void *handle)
4441 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4443 for (i = 0; i < adev->usec_timeout; i++) {
4444 /* read MC_STATUS */
4445 tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS) &
4446 GRBM_STATUS__GUI_ACTIVE_MASK;
4448 if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
4455 static int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev,
4460 for (i = 0; i < adev->usec_timeout; i++) {
4461 /* Request with MeId=2, PipeId=0 */
4462 tmp = REG_SET_FIELD(0, CP_GFX_INDEX_MUTEX, REQUEST, req);
4463 tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX, CLIENTID, 4);
4464 WREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX, tmp);
4466 val = RREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX);
4471 tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX,
4474 /* unlocked or locked by firmware */
4481 if (i >= adev->usec_timeout)
4487 static int gfx_v11_0_soft_reset(void *handle)
4489 u32 grbm_soft_reset = 0;
4492 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4494 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
4495 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 0);
4496 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 0);
4497 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 0);
4498 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 0);
4499 WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
4501 gfx_v11_0_set_safe_mode(adev, 0);
4503 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
4504 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
4505 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
4506 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
4507 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i);
4508 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j);
4509 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k);
4510 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
4512 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
4513 WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
4517 for (i = 0; i < adev->gfx.me.num_me; ++i) {
4518 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
4519 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
4520 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
4521 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i);
4522 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j);
4523 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k);
4524 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
4526 WREG32_SOC15(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST, 0x1);
4531 /* Try to acquire the gfx mutex before access to CP_VMID_RESET */
4532 r = gfx_v11_0_request_gfx_index_mutex(adev, 1);
4534 DRM_ERROR("Failed to acquire the gfx mutex during soft reset\n");
4538 WREG32_SOC15(GC, 0, regCP_VMID_RESET, 0xfffffffe);
4540 // Read CP_VMID_RESET register three times.
4541 // to get sufficient time for GFX_HQD_ACTIVE reach 0
4542 RREG32_SOC15(GC, 0, regCP_VMID_RESET);
4543 RREG32_SOC15(GC, 0, regCP_VMID_RESET);
4544 RREG32_SOC15(GC, 0, regCP_VMID_RESET);
4546 /* release the gfx mutex */
4547 r = gfx_v11_0_request_gfx_index_mutex(adev, 0);
4549 DRM_ERROR("Failed to release the gfx mutex during soft reset\n");
4553 for (i = 0; i < adev->usec_timeout; i++) {
4554 if (!RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) &&
4555 !RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE))
4559 if (i >= adev->usec_timeout) {
4560 printk("Failed to wait all pipes clean\n");
4564 /********** trigger soft reset ***********/
4565 grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
4566 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4568 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4570 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4572 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4574 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4576 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset);
4577 /********** exit soft reset ***********/
4578 grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
4579 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4581 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4583 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4585 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4587 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4589 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset);
4591 tmp = RREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL);
4592 tmp = REG_SET_FIELD(tmp, CP_SOFT_RESET_CNTL, CMP_HQD_REG_RESET, 0x1);
4593 WREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL, tmp);
4595 WREG32_SOC15(GC, 0, regCP_ME_CNTL, 0x0);
4596 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, 0x0);
4598 for (i = 0; i < adev->usec_timeout; i++) {
4599 if (!RREG32_SOC15(GC, 0, regCP_VMID_RESET))
4603 if (i >= adev->usec_timeout) {
4604 printk("Failed to wait CP_VMID_RESET to 0\n");
4608 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
4609 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1);
4610 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1);
4611 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1);
4612 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
4613 WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
4615 gfx_v11_0_unset_safe_mode(adev, 0);
4617 return gfx_v11_0_cp_resume(adev);
4620 static bool gfx_v11_0_check_soft_reset(void *handle)
4623 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4624 struct amdgpu_ring *ring;
4625 long tmo = msecs_to_jiffies(1000);
4627 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4628 ring = &adev->gfx.gfx_ring[i];
4629 r = amdgpu_ring_test_ib(ring, tmo);
4634 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4635 ring = &adev->gfx.compute_ring[i];
4636 r = amdgpu_ring_test_ib(ring, tmo);
4644 static int gfx_v11_0_post_soft_reset(void *handle)
4647 * GFX soft reset will impact MES, need resume MES when do GFX soft reset
4649 return amdgpu_mes_resume((struct amdgpu_device *)handle);
4652 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4655 uint64_t clock_counter_lo, clock_counter_hi_pre, clock_counter_hi_after;
4657 if (amdgpu_sriov_vf(adev)) {
4658 amdgpu_gfx_off_ctrl(adev, false);
4659 mutex_lock(&adev->gfx.gpu_clock_mutex);
4660 clock_counter_hi_pre = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
4661 clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
4662 clock_counter_hi_after = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
4663 if (clock_counter_hi_pre != clock_counter_hi_after)
4664 clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
4665 mutex_unlock(&adev->gfx.gpu_clock_mutex);
4666 amdgpu_gfx_off_ctrl(adev, true);
4669 clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
4670 clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
4671 clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
4672 if (clock_counter_hi_pre != clock_counter_hi_after)
4673 clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
4676 clock = clock_counter_lo | (clock_counter_hi_after << 32ULL);
4681 static void gfx_v11_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4683 uint32_t gds_base, uint32_t gds_size,
4684 uint32_t gws_base, uint32_t gws_size,
4685 uint32_t oa_base, uint32_t oa_size)
4687 struct amdgpu_device *adev = ring->adev;
4690 gfx_v11_0_write_data_to_reg(ring, 0, false,
4691 SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_BASE) + 2 * vmid,
4695 gfx_v11_0_write_data_to_reg(ring, 0, false,
4696 SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_SIZE) + 2 * vmid,
4700 gfx_v11_0_write_data_to_reg(ring, 0, false,
4701 SOC15_REG_OFFSET(GC, 0, regGDS_GWS_VMID0) + vmid,
4702 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4705 gfx_v11_0_write_data_to_reg(ring, 0, false,
4706 SOC15_REG_OFFSET(GC, 0, regGDS_OA_VMID0) + vmid,
4707 (1 << (oa_size + oa_base)) - (1 << oa_base));
4710 static int gfx_v11_0_early_init(void *handle)
4712 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4714 adev->gfx.funcs = &gfx_v11_0_gfx_funcs;
4716 adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS;
4717 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
4718 AMDGPU_MAX_COMPUTE_RINGS);
4720 gfx_v11_0_set_kiq_pm4_funcs(adev);
4721 gfx_v11_0_set_ring_funcs(adev);
4722 gfx_v11_0_set_irq_funcs(adev);
4723 gfx_v11_0_set_gds_init(adev);
4724 gfx_v11_0_set_rlc_funcs(adev);
4725 gfx_v11_0_set_mqd_funcs(adev);
4726 gfx_v11_0_set_imu_funcs(adev);
4728 gfx_v11_0_init_rlcg_reg_access_ctrl(adev);
4730 return gfx_v11_0_init_microcode(adev);
4733 static int gfx_v11_0_late_init(void *handle)
4735 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4738 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4742 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4749 static bool gfx_v11_0_is_rlc_enabled(struct amdgpu_device *adev)
4753 /* if RLC is not enabled, do nothing */
4754 rlc_cntl = RREG32_SOC15(GC, 0, regRLC_CNTL);
4755 return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false;
4758 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
4763 data = RLC_SAFE_MODE__CMD_MASK;
4764 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
4766 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data);
4768 /* wait for RLC_SAFE_MODE */
4769 for (i = 0; i < adev->usec_timeout; i++) {
4770 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, regRLC_SAFE_MODE),
4771 RLC_SAFE_MODE, CMD))
4777 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
4779 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK);
4782 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
4787 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_PERF_CLK))
4790 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4793 data &= ~RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
4795 data |= RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
4798 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4801 static void gfx_v11_0_update_sram_fgcg(struct amdgpu_device *adev,
4806 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
4809 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4812 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
4814 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
4817 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4820 static void gfx_v11_0_update_repeater_fgcg(struct amdgpu_device *adev,
4825 if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
4828 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4831 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK;
4833 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK;
4836 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4839 static void gfx_v11_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
4844 if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)))
4847 /* It is disabled by HW by default */
4849 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
4850 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
4851 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4853 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4854 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
4855 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
4858 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4861 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
4862 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4864 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
4865 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4866 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
4869 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4874 static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
4879 if (!(adev->cg_flags &
4880 (AMD_CG_SUPPORT_GFX_CGCG |
4881 AMD_CG_SUPPORT_GFX_CGLS |
4882 AMD_CG_SUPPORT_GFX_3D_CGCG |
4883 AMD_CG_SUPPORT_GFX_3D_CGLS)))
4887 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4889 /* unset CGCG override */
4890 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
4891 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
4892 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4893 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4894 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG ||
4895 adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
4896 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
4898 /* update CGCG override bits */
4900 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4902 /* enable cgcg FSM(0x0000363F) */
4903 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
4905 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
4906 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK;
4907 data |= (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4908 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4911 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
4912 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK;
4913 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4914 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
4918 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data);
4920 /* Program RLC_CGCG_CGLS_CTRL_3D */
4921 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
4923 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) {
4924 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD_MASK;
4925 data |= (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4926 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4929 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) {
4930 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY_MASK;
4931 data |= (0xf << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4932 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
4936 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data);
4938 /* set IDLE_POLL_COUNT(0x00900100) */
4939 def = data = RREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL);
4941 data &= ~(CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK | CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK);
4942 data |= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4943 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4946 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL, data);
4948 data = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
4949 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1);
4950 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1);
4951 data = REG_SET_FIELD(data, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1);
4952 data = REG_SET_FIELD(data, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
4953 WREG32_SOC15(GC, 0, regCP_INT_CNTL, data);
4955 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL);
4956 data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
4957 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
4959 /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
4960 if (adev->sdma.num_instances > 1) {
4961 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
4962 data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
4963 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
4966 /* Program RLC_CGCG_CGLS_CTRL */
4967 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
4969 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
4970 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4972 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4973 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
4976 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data);
4978 /* Program RLC_CGCG_CGLS_CTRL_3D */
4979 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
4981 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
4982 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4983 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
4984 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
4987 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data);
4989 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL);
4990 data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
4991 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
4993 /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
4994 if (adev->sdma.num_instances > 1) {
4995 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
4996 data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
4997 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
5002 static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev,
5005 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5007 gfx_v11_0_update_coarse_grain_clock_gating(adev, enable);
5009 gfx_v11_0_update_medium_grain_clock_gating(adev, enable);
5011 gfx_v11_0_update_repeater_fgcg(adev, enable);
5013 gfx_v11_0_update_sram_fgcg(adev, enable);
5015 gfx_v11_0_update_perf_clk(adev, enable);
5017 if (adev->cg_flags &
5018 (AMD_CG_SUPPORT_GFX_MGCG |
5019 AMD_CG_SUPPORT_GFX_CGLS |
5020 AMD_CG_SUPPORT_GFX_CGCG |
5021 AMD_CG_SUPPORT_GFX_3D_CGCG |
5022 AMD_CG_SUPPORT_GFX_3D_CGLS))
5023 gfx_v11_0_enable_gui_idle_interrupt(adev, enable);
5025 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5030 static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
5034 amdgpu_gfx_off_ctrl(adev, false);
5036 data = RREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL);
5038 data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
5039 data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
5041 WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data);
5043 amdgpu_gfx_off_ctrl(adev, true);
5046 static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = {
5047 .is_rlc_enabled = gfx_v11_0_is_rlc_enabled,
5048 .set_safe_mode = gfx_v11_0_set_safe_mode,
5049 .unset_safe_mode = gfx_v11_0_unset_safe_mode,
5050 .init = gfx_v11_0_rlc_init,
5051 .get_csb_size = gfx_v11_0_get_csb_size,
5052 .get_csb_buffer = gfx_v11_0_get_csb_buffer,
5053 .resume = gfx_v11_0_rlc_resume,
5054 .stop = gfx_v11_0_rlc_stop,
5055 .reset = gfx_v11_0_rlc_reset,
5056 .start = gfx_v11_0_rlc_start,
5057 .update_spm_vmid = gfx_v11_0_update_spm_vmid,
5060 static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable)
5062 u32 data = RREG32_SOC15(GC, 0, regRLC_PG_CNTL);
5064 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
5065 data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
5067 data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
5069 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, data);
5071 // Program RLC_PG_DELAY3 for CGPG hysteresis
5072 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
5073 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
5074 case IP_VERSION(11, 0, 1):
5075 case IP_VERSION(11, 0, 4):
5076 case IP_VERSION(11, 5, 0):
5077 WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1);
5085 static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable)
5087 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5089 gfx_v11_cntl_power_gating(adev, enable);
5091 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5094 static int gfx_v11_0_set_powergating_state(void *handle,
5095 enum amd_powergating_state state)
5097 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5098 bool enable = (state == AMD_PG_STATE_GATE);
5100 if (amdgpu_sriov_vf(adev))
5103 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
5104 case IP_VERSION(11, 0, 0):
5105 case IP_VERSION(11, 0, 2):
5106 case IP_VERSION(11, 0, 3):
5107 amdgpu_gfx_off_ctrl(adev, enable);
5109 case IP_VERSION(11, 0, 1):
5110 case IP_VERSION(11, 0, 4):
5111 case IP_VERSION(11, 5, 0):
5113 amdgpu_gfx_off_ctrl(adev, false);
5115 gfx_v11_cntl_pg(adev, enable);
5118 amdgpu_gfx_off_ctrl(adev, true);
5128 static int gfx_v11_0_set_clockgating_state(void *handle,
5129 enum amd_clockgating_state state)
5131 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5133 if (amdgpu_sriov_vf(adev))
5136 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
5137 case IP_VERSION(11, 0, 0):
5138 case IP_VERSION(11, 0, 1):
5139 case IP_VERSION(11, 0, 2):
5140 case IP_VERSION(11, 0, 3):
5141 case IP_VERSION(11, 0, 4):
5142 case IP_VERSION(11, 5, 0):
5143 gfx_v11_0_update_gfx_clock_gating(adev,
5144 state == AMD_CG_STATE_GATE);
5153 static void gfx_v11_0_get_clockgating_state(void *handle, u64 *flags)
5155 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5158 /* AMD_CG_SUPPORT_GFX_MGCG */
5159 data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5160 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
5161 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
5163 /* AMD_CG_SUPPORT_REPEATER_FGCG */
5164 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK))
5165 *flags |= AMD_CG_SUPPORT_REPEATER_FGCG;
5167 /* AMD_CG_SUPPORT_GFX_FGCG */
5168 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK))
5169 *flags |= AMD_CG_SUPPORT_GFX_FGCG;
5171 /* AMD_CG_SUPPORT_GFX_PERF_CLK */
5172 if (!(data & RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK))
5173 *flags |= AMD_CG_SUPPORT_GFX_PERF_CLK;
5175 /* AMD_CG_SUPPORT_GFX_CGCG */
5176 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
5177 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5178 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
5180 /* AMD_CG_SUPPORT_GFX_CGLS */
5181 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5182 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
5184 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
5185 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
5186 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
5187 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
5189 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
5190 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
5191 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
5194 static u64 gfx_v11_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
5196 /* gfx11 is 32bit rptr*/
5197 return *(uint32_t *)ring->rptr_cpu_addr;
5200 static u64 gfx_v11_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
5202 struct amdgpu_device *adev = ring->adev;
5205 /* XXX check if swapping is necessary on BE */
5206 if (ring->use_doorbell) {
5207 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5209 wptr = RREG32_SOC15(GC, 0, regCP_RB0_WPTR);
5210 wptr += (u64)RREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI) << 32;
5216 static void gfx_v11_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
5218 struct amdgpu_device *adev = ring->adev;
5220 if (ring->use_doorbell) {
5221 /* XXX check if swapping is necessary on BE */
5222 atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
5224 WDOORBELL64(ring->doorbell_index, ring->wptr);
5226 WREG32_SOC15(GC, 0, regCP_RB0_WPTR,
5227 lower_32_bits(ring->wptr));
5228 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI,
5229 upper_32_bits(ring->wptr));
5233 static u64 gfx_v11_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
5235 /* gfx11 hardware is 32bit rptr */
5236 return *(uint32_t *)ring->rptr_cpu_addr;
5239 static u64 gfx_v11_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
5243 /* XXX check if swapping is necessary on BE */
5244 if (ring->use_doorbell)
5245 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5251 static void gfx_v11_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
5253 struct amdgpu_device *adev = ring->adev;
5255 /* XXX check if swapping is necessary on BE */
5256 if (ring->use_doorbell) {
5257 atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
5259 WDOORBELL64(ring->doorbell_index, ring->wptr);
5261 BUG(); /* only DOORBELL method supported on gfx11 now */
5265 static void gfx_v11_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
5267 struct amdgpu_device *adev = ring->adev;
5268 u32 ref_and_mask, reg_mem_engine;
5269 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
5271 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
5274 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
5277 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
5284 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
5285 reg_mem_engine = 1; /* pfp */
5288 gfx_v11_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
5289 adev->nbio.funcs->get_hdp_flush_req_offset(adev),
5290 adev->nbio.funcs->get_hdp_flush_done_offset(adev),
5291 ref_and_mask, ref_and_mask, 0x20);
5294 static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
5295 struct amdgpu_job *job,
5296 struct amdgpu_ib *ib,
5299 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5300 u32 header, control = 0;
5302 BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE);
5304 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
5306 control |= ib->length_dw | (vmid << 24);
5308 if (ring->adev->gfx.mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
5309 control |= INDIRECT_BUFFER_PRE_ENB(1);
5311 if (flags & AMDGPU_IB_PREEMPTED)
5312 control |= INDIRECT_BUFFER_PRE_RESUME(1);
5315 gfx_v11_0_ring_emit_de_meta(ring,
5316 (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
5319 if (ring->is_mes_queue)
5320 /* inherit vmid from mqd */
5321 control |= 0x400000;
5323 amdgpu_ring_write(ring, header);
5324 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5325 amdgpu_ring_write(ring,
5329 lower_32_bits(ib->gpu_addr));
5330 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5331 amdgpu_ring_write(ring, control);
5334 static void gfx_v11_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
5335 struct amdgpu_job *job,
5336 struct amdgpu_ib *ib,
5339 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5340 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
5342 if (ring->is_mes_queue)
5343 /* inherit vmid from mqd */
5344 control |= 0x40000000;
5346 /* Currently, there is a high possibility to get wave ID mismatch
5347 * between ME and GDS, leading to a hw deadlock, because ME generates
5348 * different wave IDs than the GDS expects. This situation happens
5349 * randomly when at least 5 compute pipes use GDS ordered append.
5350 * The wave IDs generated by ME are also wrong after suspend/resume.
5351 * Those are probably bugs somewhere else in the kernel driver.
5353 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
5354 * GDS to 0 for this ring (me/pipe).
5356 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
5357 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
5358 amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
5359 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
5362 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
5363 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5364 amdgpu_ring_write(ring,
5368 lower_32_bits(ib->gpu_addr));
5369 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5370 amdgpu_ring_write(ring, control);
5373 static void gfx_v11_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
5374 u64 seq, unsigned flags)
5376 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
5377 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
5379 /* RELEASE_MEM - flush caches, send int */
5380 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
5381 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ |
5382 PACKET3_RELEASE_MEM_GCR_GL2_WB |
5383 PACKET3_RELEASE_MEM_GCR_GL2_INV |
5384 PACKET3_RELEASE_MEM_GCR_GL2_US |
5385 PACKET3_RELEASE_MEM_GCR_GL1_INV |
5386 PACKET3_RELEASE_MEM_GCR_GLV_INV |
5387 PACKET3_RELEASE_MEM_GCR_GLM_INV |
5388 PACKET3_RELEASE_MEM_GCR_GLM_WB |
5389 PACKET3_RELEASE_MEM_CACHE_POLICY(3) |
5390 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
5391 PACKET3_RELEASE_MEM_EVENT_INDEX(5)));
5392 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) |
5393 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0)));
5396 * the address should be Qword aligned if 64bit write, Dword
5397 * aligned if only send 32bit data low (discard data high)
5403 amdgpu_ring_write(ring, lower_32_bits(addr));
5404 amdgpu_ring_write(ring, upper_32_bits(addr));
5405 amdgpu_ring_write(ring, lower_32_bits(seq));
5406 amdgpu_ring_write(ring, upper_32_bits(seq));
5407 amdgpu_ring_write(ring, ring->is_mes_queue ?
5408 (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0);
5411 static void gfx_v11_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
5413 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5414 uint32_t seq = ring->fence_drv.sync_seq;
5415 uint64_t addr = ring->fence_drv.gpu_addr;
5417 gfx_v11_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr),
5418 upper_32_bits(addr), seq, 0xffffffff, 4);
5421 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
5422 uint16_t pasid, uint32_t flush_type,
5423 bool all_hub, uint8_t dst_sel)
5425 amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
5426 amdgpu_ring_write(ring,
5427 PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) |
5428 PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
5429 PACKET3_INVALIDATE_TLBS_PASID(pasid) |
5430 PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
5433 static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
5434 unsigned vmid, uint64_t pd_addr)
5436 if (ring->is_mes_queue)
5437 gfx_v11_0_ring_invalidate_tlbs(ring, 0, 0, false, 0);
5439 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
5441 /* compute doesn't have PFP */
5442 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
5443 /* sync PFP to ME, otherwise we might get invalid PFP reads */
5444 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5445 amdgpu_ring_write(ring, 0x0);
5449 static void gfx_v11_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
5450 u64 seq, unsigned int flags)
5452 struct amdgpu_device *adev = ring->adev;
5454 /* we only allocate 32bit for each seq wb address */
5455 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
5457 /* write fence seq to the "addr" */
5458 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5459 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5460 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
5461 amdgpu_ring_write(ring, lower_32_bits(addr));
5462 amdgpu_ring_write(ring, upper_32_bits(addr));
5463 amdgpu_ring_write(ring, lower_32_bits(seq));
5465 if (flags & AMDGPU_FENCE_FLAG_INT) {
5466 /* set register to trigger INT */
5467 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5468 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5469 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
5470 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, regCPC_INT_STATUS));
5471 amdgpu_ring_write(ring, 0);
5472 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
5476 static void gfx_v11_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
5481 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
5482 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
5483 /* set load_global_config & load_global_uconfig */
5485 /* set load_cs_sh_regs */
5487 /* set load_per_context_state & load_gfx_sh_regs for GFX */
5491 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5492 amdgpu_ring_write(ring, dw2);
5493 amdgpu_ring_write(ring, 0);
5496 static void gfx_v11_0_ring_emit_gfx_shadow(struct amdgpu_ring *ring,
5497 u64 shadow_va, u64 csa_va,
5498 u64 gds_va, bool init_shadow,
5501 struct amdgpu_device *adev = ring->adev;
5503 if (!adev->gfx.cp_gfx_shadow)
5506 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_Q_PREEMPTION_MODE, 7));
5507 amdgpu_ring_write(ring, lower_32_bits(shadow_va));
5508 amdgpu_ring_write(ring, upper_32_bits(shadow_va));
5509 amdgpu_ring_write(ring, lower_32_bits(gds_va));
5510 amdgpu_ring_write(ring, upper_32_bits(gds_va));
5511 amdgpu_ring_write(ring, lower_32_bits(csa_va));
5512 amdgpu_ring_write(ring, upper_32_bits(csa_va));
5513 amdgpu_ring_write(ring, shadow_va ?
5514 PACKET3_SET_Q_PREEMPTION_MODE_IB_VMID(vmid) : 0);
5515 amdgpu_ring_write(ring, init_shadow ?
5516 PACKET3_SET_Q_PREEMPTION_MODE_INIT_SHADOW_MEM : 0);
5519 static unsigned gfx_v11_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
5523 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
5524 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
5525 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
5526 amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
5527 ret = ring->wptr & ring->buf_mask;
5528 amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
5533 static void gfx_v11_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
5536 BUG_ON(offset > ring->buf_mask);
5537 BUG_ON(ring->ring[offset] != 0x55aa55aa);
5539 cur = (ring->wptr - 1) & ring->buf_mask;
5540 if (likely(cur > offset))
5541 ring->ring[offset] = cur - offset;
5543 ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
5546 static int gfx_v11_0_ring_preempt_ib(struct amdgpu_ring *ring)
5549 struct amdgpu_device *adev = ring->adev;
5550 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
5551 struct amdgpu_ring *kiq_ring = &kiq->ring;
5552 unsigned long flags;
5554 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
5557 spin_lock_irqsave(&kiq->ring_lock, flags);
5559 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
5560 spin_unlock_irqrestore(&kiq->ring_lock, flags);
5564 /* assert preemption condition */
5565 amdgpu_ring_set_preempt_cond_exec(ring, false);
5567 /* assert IB preemption, emit the trailing fence */
5568 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP,
5569 ring->trail_fence_gpu_addr,
5571 amdgpu_ring_commit(kiq_ring);
5573 spin_unlock_irqrestore(&kiq->ring_lock, flags);
5575 /* poll the trailing fence */
5576 for (i = 0; i < adev->usec_timeout; i++) {
5577 if (ring->trail_seq ==
5578 le32_to_cpu(*(ring->trail_fence_cpu_addr)))
5583 if (i >= adev->usec_timeout) {
5585 DRM_ERROR("ring %d failed to preempt ib\n", ring->idx);
5588 /* deassert preemption condition */
5589 amdgpu_ring_set_preempt_cond_exec(ring, true);
5593 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
5595 struct amdgpu_device *adev = ring->adev;
5596 struct v10_de_ib_state de_payload = {0};
5597 uint64_t offset, gds_addr, de_payload_gpu_addr;
5598 void *de_payload_cpu_addr;
5601 if (ring->is_mes_queue) {
5602 offset = offsetof(struct amdgpu_mes_ctx_meta_data,
5603 gfx[0].gfx_meta_data) +
5604 offsetof(struct v10_gfx_meta_data, de_payload);
5605 de_payload_gpu_addr =
5606 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
5607 de_payload_cpu_addr =
5608 amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
5610 offset = offsetof(struct amdgpu_mes_ctx_meta_data,
5611 gfx[0].gds_backup) +
5612 offsetof(struct v10_gfx_meta_data, de_payload);
5613 gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
5615 offset = offsetof(struct v10_gfx_meta_data, de_payload);
5616 de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
5617 de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
5619 gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
5620 AMDGPU_CSA_SIZE - adev->gds.gds_size,
5624 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
5625 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
5627 cnt = (sizeof(de_payload) >> 2) + 4 - 2;
5628 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5629 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5630 WRITE_DATA_DST_SEL(8) |
5632 WRITE_DATA_CACHE_POLICY(0));
5633 amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr));
5634 amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr));
5637 amdgpu_ring_write_multiple(ring, de_payload_cpu_addr,
5638 sizeof(de_payload) >> 2);
5640 amdgpu_ring_write_multiple(ring, (void *)&de_payload,
5641 sizeof(de_payload) >> 2);
5644 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
5647 uint32_t v = secure ? FRAME_TMZ : 0;
5649 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
5650 amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
5653 static void gfx_v11_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
5654 uint32_t reg_val_offs)
5656 struct amdgpu_device *adev = ring->adev;
5658 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
5659 amdgpu_ring_write(ring, 0 | /* src: register*/
5660 (5 << 8) | /* dst: memory */
5661 (1 << 20)); /* write confirm */
5662 amdgpu_ring_write(ring, reg);
5663 amdgpu_ring_write(ring, 0);
5664 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
5666 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
5670 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
5675 switch (ring->funcs->type) {
5676 case AMDGPU_RING_TYPE_GFX:
5677 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
5679 case AMDGPU_RING_TYPE_KIQ:
5680 cmd = (1 << 16); /* no inc addr */
5686 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5687 amdgpu_ring_write(ring, cmd);
5688 amdgpu_ring_write(ring, reg);
5689 amdgpu_ring_write(ring, 0);
5690 amdgpu_ring_write(ring, val);
5693 static void gfx_v11_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
5694 uint32_t val, uint32_t mask)
5696 gfx_v11_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
5699 static void gfx_v11_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
5700 uint32_t reg0, uint32_t reg1,
5701 uint32_t ref, uint32_t mask)
5703 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5705 gfx_v11_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
5709 static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring,
5712 struct amdgpu_device *adev = ring->adev;
5715 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
5716 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
5717 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
5718 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
5719 WREG32_SOC15(GC, 0, regSQ_CMD, value);
5723 gfx_v11_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
5724 uint32_t me, uint32_t pipe,
5725 enum amdgpu_interrupt_state state)
5727 uint32_t cp_int_cntl, cp_int_cntl_reg;
5732 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0);
5735 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1);
5738 DRM_DEBUG("invalid pipe %d\n", pipe);
5742 DRM_DEBUG("invalid me %d\n", me);
5747 case AMDGPU_IRQ_STATE_DISABLE:
5748 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
5749 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
5750 TIME_STAMP_INT_ENABLE, 0);
5751 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
5752 GENERIC0_INT_ENABLE, 0);
5753 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
5755 case AMDGPU_IRQ_STATE_ENABLE:
5756 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
5757 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
5758 TIME_STAMP_INT_ENABLE, 1);
5759 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
5760 GENERIC0_INT_ENABLE, 1);
5761 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
5768 static void gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
5770 enum amdgpu_interrupt_state state)
5772 u32 mec_int_cntl, mec_int_cntl_reg;
5775 * amdgpu controls only the first MEC. That's why this function only
5776 * handles the setting of interrupts for this specific MEC. All other
5777 * pipes' interrupts are set by amdkfd.
5783 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
5786 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL);
5789 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL);
5792 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL);
5795 DRM_DEBUG("invalid pipe %d\n", pipe);
5799 DRM_DEBUG("invalid me %d\n", me);
5804 case AMDGPU_IRQ_STATE_DISABLE:
5805 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
5806 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5807 TIME_STAMP_INT_ENABLE, 0);
5808 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5809 GENERIC0_INT_ENABLE, 0);
5810 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
5812 case AMDGPU_IRQ_STATE_ENABLE:
5813 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
5814 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5815 TIME_STAMP_INT_ENABLE, 1);
5816 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5817 GENERIC0_INT_ENABLE, 1);
5818 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
5825 static int gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev,
5826 struct amdgpu_irq_src *src,
5828 enum amdgpu_interrupt_state state)
5831 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
5832 gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 0, state);
5834 case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP:
5835 gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 1, state);
5837 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
5838 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
5840 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
5841 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
5843 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
5844 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
5846 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
5847 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
5855 static int gfx_v11_0_eop_irq(struct amdgpu_device *adev,
5856 struct amdgpu_irq_src *source,
5857 struct amdgpu_iv_entry *entry)
5860 u8 me_id, pipe_id, queue_id;
5861 struct amdgpu_ring *ring;
5862 uint32_t mes_queue_id = entry->src_data[0];
5864 DRM_DEBUG("IH: CP EOP\n");
5866 if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
5867 struct amdgpu_mes_queue *queue;
5869 mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
5871 spin_lock(&adev->mes.queue_id_lock);
5872 queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
5874 DRM_DEBUG("process mes queue id = %d\n", mes_queue_id);
5875 amdgpu_fence_process(queue->ring);
5877 spin_unlock(&adev->mes.queue_id_lock);
5879 me_id = (entry->ring_id & 0x0c) >> 2;
5880 pipe_id = (entry->ring_id & 0x03) >> 0;
5881 queue_id = (entry->ring_id & 0x70) >> 4;
5886 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
5888 amdgpu_fence_process(&adev->gfx.gfx_ring[1]);
5892 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5893 ring = &adev->gfx.compute_ring[i];
5894 /* Per-queue interrupt is supported for MEC starting from VI.
5895 * The interrupt can only be enabled/disabled per pipe instead
5898 if ((ring->me == me_id) &&
5899 (ring->pipe == pipe_id) &&
5900 (ring->queue == queue_id))
5901 amdgpu_fence_process(ring);
5910 static int gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
5911 struct amdgpu_irq_src *source,
5913 enum amdgpu_interrupt_state state)
5916 case AMDGPU_IRQ_STATE_DISABLE:
5917 case AMDGPU_IRQ_STATE_ENABLE:
5918 WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0,
5919 PRIV_REG_INT_ENABLE,
5920 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5929 static int gfx_v11_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
5930 struct amdgpu_irq_src *source,
5932 enum amdgpu_interrupt_state state)
5935 case AMDGPU_IRQ_STATE_DISABLE:
5936 case AMDGPU_IRQ_STATE_ENABLE:
5937 WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0,
5938 PRIV_INSTR_INT_ENABLE,
5939 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5948 static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev,
5949 struct amdgpu_iv_entry *entry)
5951 u8 me_id, pipe_id, queue_id;
5952 struct amdgpu_ring *ring;
5955 me_id = (entry->ring_id & 0x0c) >> 2;
5956 pipe_id = (entry->ring_id & 0x03) >> 0;
5957 queue_id = (entry->ring_id & 0x70) >> 4;
5961 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
5962 ring = &adev->gfx.gfx_ring[i];
5963 /* we only enabled 1 gfx queue per pipe for now */
5964 if (ring->me == me_id && ring->pipe == pipe_id)
5965 drm_sched_fault(&ring->sched);
5970 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5971 ring = &adev->gfx.compute_ring[i];
5972 if (ring->me == me_id && ring->pipe == pipe_id &&
5973 ring->queue == queue_id)
5974 drm_sched_fault(&ring->sched);
5983 static int gfx_v11_0_priv_reg_irq(struct amdgpu_device *adev,
5984 struct amdgpu_irq_src *source,
5985 struct amdgpu_iv_entry *entry)
5987 DRM_ERROR("Illegal register access in command stream\n");
5988 gfx_v11_0_handle_priv_fault(adev, entry);
5992 static int gfx_v11_0_priv_inst_irq(struct amdgpu_device *adev,
5993 struct amdgpu_irq_src *source,
5994 struct amdgpu_iv_entry *entry)
5996 DRM_ERROR("Illegal instruction in command stream\n");
5997 gfx_v11_0_handle_priv_fault(adev, entry);
6001 static int gfx_v11_0_rlc_gc_fed_irq(struct amdgpu_device *adev,
6002 struct amdgpu_irq_src *source,
6003 struct amdgpu_iv_entry *entry)
6005 if (adev->gfx.ras && adev->gfx.ras->rlc_gc_fed_irq)
6006 return adev->gfx.ras->rlc_gc_fed_irq(adev, source, entry);
6012 static int gfx_v11_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
6013 struct amdgpu_irq_src *src,
6015 enum amdgpu_interrupt_state state)
6017 uint32_t tmp, target;
6018 struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring);
6020 target = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
6021 target += ring->pipe;
6024 case AMDGPU_CP_KIQ_IRQ_DRIVER0:
6025 if (state == AMDGPU_IRQ_STATE_DISABLE) {
6026 tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL);
6027 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
6028 GENERIC2_INT_ENABLE, 0);
6029 WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp);
6031 tmp = RREG32_SOC15_IP(GC, target);
6032 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL,
6033 GENERIC2_INT_ENABLE, 0);
6034 WREG32_SOC15_IP(GC, target, tmp);
6036 tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL);
6037 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
6038 GENERIC2_INT_ENABLE, 1);
6039 WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp);
6041 tmp = RREG32_SOC15_IP(GC, target);
6042 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL,
6043 GENERIC2_INT_ENABLE, 1);
6044 WREG32_SOC15_IP(GC, target, tmp);
6048 BUG(); /* kiq only support GENERIC2_INT now */
6055 static void gfx_v11_0_emit_mem_sync(struct amdgpu_ring *ring)
6057 const unsigned int gcr_cntl =
6058 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) |
6059 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) |
6060 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) |
6061 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) |
6062 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) |
6063 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) |
6064 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) |
6065 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1);
6067 /* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */
6068 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6));
6069 amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */
6070 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
6071 amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */
6072 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
6073 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
6074 amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
6075 amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
6078 static const struct amd_ip_funcs gfx_v11_0_ip_funcs = {
6079 .name = "gfx_v11_0",
6080 .early_init = gfx_v11_0_early_init,
6081 .late_init = gfx_v11_0_late_init,
6082 .sw_init = gfx_v11_0_sw_init,
6083 .sw_fini = gfx_v11_0_sw_fini,
6084 .hw_init = gfx_v11_0_hw_init,
6085 .hw_fini = gfx_v11_0_hw_fini,
6086 .suspend = gfx_v11_0_suspend,
6087 .resume = gfx_v11_0_resume,
6088 .is_idle = gfx_v11_0_is_idle,
6089 .wait_for_idle = gfx_v11_0_wait_for_idle,
6090 .soft_reset = gfx_v11_0_soft_reset,
6091 .check_soft_reset = gfx_v11_0_check_soft_reset,
6092 .post_soft_reset = gfx_v11_0_post_soft_reset,
6093 .set_clockgating_state = gfx_v11_0_set_clockgating_state,
6094 .set_powergating_state = gfx_v11_0_set_powergating_state,
6095 .get_clockgating_state = gfx_v11_0_get_clockgating_state,
6098 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
6099 .type = AMDGPU_RING_TYPE_GFX,
6101 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6102 .support_64bit_ptrs = true,
6103 .secure_submission_supported = true,
6104 .get_rptr = gfx_v11_0_ring_get_rptr_gfx,
6105 .get_wptr = gfx_v11_0_ring_get_wptr_gfx,
6106 .set_wptr = gfx_v11_0_ring_set_wptr_gfx,
6107 .emit_frame_size = /* totally 242 maximum if 16 IBs */
6109 9 + /* SET_Q_PREEMPTION_MODE */
6110 7 + /* PIPELINE_SYNC */
6111 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6112 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6114 8 + /* FENCE for VM_FLUSH */
6115 20 + /* GDS switch */
6122 8 + 8 + /* FENCE x2 */
6123 8, /* gfx_v11_0_emit_mem_sync */
6124 .emit_ib_size = 4, /* gfx_v11_0_ring_emit_ib_gfx */
6125 .emit_ib = gfx_v11_0_ring_emit_ib_gfx,
6126 .emit_fence = gfx_v11_0_ring_emit_fence,
6127 .emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync,
6128 .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush,
6129 .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch,
6130 .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
6131 .test_ring = gfx_v11_0_ring_test_ring,
6132 .test_ib = gfx_v11_0_ring_test_ib,
6133 .insert_nop = amdgpu_ring_insert_nop,
6134 .pad_ib = amdgpu_ring_generic_pad_ib,
6135 .emit_cntxcntl = gfx_v11_0_ring_emit_cntxcntl,
6136 .emit_gfx_shadow = gfx_v11_0_ring_emit_gfx_shadow,
6137 .init_cond_exec = gfx_v11_0_ring_emit_init_cond_exec,
6138 .patch_cond_exec = gfx_v11_0_ring_emit_patch_cond_exec,
6139 .preempt_ib = gfx_v11_0_ring_preempt_ib,
6140 .emit_frame_cntl = gfx_v11_0_ring_emit_frame_cntl,
6141 .emit_wreg = gfx_v11_0_ring_emit_wreg,
6142 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
6143 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
6144 .soft_recovery = gfx_v11_0_ring_soft_recovery,
6145 .emit_mem_sync = gfx_v11_0_emit_mem_sync,
6148 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
6149 .type = AMDGPU_RING_TYPE_COMPUTE,
6151 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6152 .support_64bit_ptrs = true,
6153 .get_rptr = gfx_v11_0_ring_get_rptr_compute,
6154 .get_wptr = gfx_v11_0_ring_get_wptr_compute,
6155 .set_wptr = gfx_v11_0_ring_set_wptr_compute,
6157 20 + /* gfx_v11_0_ring_emit_gds_switch */
6158 7 + /* gfx_v11_0_ring_emit_hdp_flush */
6159 5 + /* hdp invalidate */
6160 7 + /* gfx_v11_0_ring_emit_pipeline_sync */
6161 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6162 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6163 2 + /* gfx_v11_0_ring_emit_vm_flush */
6164 8 + 8 + 8 + /* gfx_v11_0_ring_emit_fence x3 for user fence, vm fence */
6165 8, /* gfx_v11_0_emit_mem_sync */
6166 .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */
6167 .emit_ib = gfx_v11_0_ring_emit_ib_compute,
6168 .emit_fence = gfx_v11_0_ring_emit_fence,
6169 .emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync,
6170 .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush,
6171 .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch,
6172 .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
6173 .test_ring = gfx_v11_0_ring_test_ring,
6174 .test_ib = gfx_v11_0_ring_test_ib,
6175 .insert_nop = amdgpu_ring_insert_nop,
6176 .pad_ib = amdgpu_ring_generic_pad_ib,
6177 .emit_wreg = gfx_v11_0_ring_emit_wreg,
6178 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
6179 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
6180 .emit_mem_sync = gfx_v11_0_emit_mem_sync,
6183 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
6184 .type = AMDGPU_RING_TYPE_KIQ,
6186 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6187 .support_64bit_ptrs = true,
6188 .get_rptr = gfx_v11_0_ring_get_rptr_compute,
6189 .get_wptr = gfx_v11_0_ring_get_wptr_compute,
6190 .set_wptr = gfx_v11_0_ring_set_wptr_compute,
6192 20 + /* gfx_v11_0_ring_emit_gds_switch */
6193 7 + /* gfx_v11_0_ring_emit_hdp_flush */
6194 5 + /*hdp invalidate */
6195 7 + /* gfx_v11_0_ring_emit_pipeline_sync */
6196 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6197 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6198 2 + /* gfx_v11_0_ring_emit_vm_flush */
6199 8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */
6200 .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */
6201 .emit_ib = gfx_v11_0_ring_emit_ib_compute,
6202 .emit_fence = gfx_v11_0_ring_emit_fence_kiq,
6203 .test_ring = gfx_v11_0_ring_test_ring,
6204 .test_ib = gfx_v11_0_ring_test_ib,
6205 .insert_nop = amdgpu_ring_insert_nop,
6206 .pad_ib = amdgpu_ring_generic_pad_ib,
6207 .emit_rreg = gfx_v11_0_ring_emit_rreg,
6208 .emit_wreg = gfx_v11_0_ring_emit_wreg,
6209 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
6210 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
6213 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev)
6217 adev->gfx.kiq[0].ring.funcs = &gfx_v11_0_ring_funcs_kiq;
6219 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
6220 adev->gfx.gfx_ring[i].funcs = &gfx_v11_0_ring_funcs_gfx;
6222 for (i = 0; i < adev->gfx.num_compute_rings; i++)
6223 adev->gfx.compute_ring[i].funcs = &gfx_v11_0_ring_funcs_compute;
6226 static const struct amdgpu_irq_src_funcs gfx_v11_0_eop_irq_funcs = {
6227 .set = gfx_v11_0_set_eop_interrupt_state,
6228 .process = gfx_v11_0_eop_irq,
6231 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_reg_irq_funcs = {
6232 .set = gfx_v11_0_set_priv_reg_fault_state,
6233 .process = gfx_v11_0_priv_reg_irq,
6236 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = {
6237 .set = gfx_v11_0_set_priv_inst_fault_state,
6238 .process = gfx_v11_0_priv_inst_irq,
6241 static const struct amdgpu_irq_src_funcs gfx_v11_0_rlc_gc_fed_irq_funcs = {
6242 .process = gfx_v11_0_rlc_gc_fed_irq,
6245 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev)
6247 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
6248 adev->gfx.eop_irq.funcs = &gfx_v11_0_eop_irq_funcs;
6250 adev->gfx.priv_reg_irq.num_types = 1;
6251 adev->gfx.priv_reg_irq.funcs = &gfx_v11_0_priv_reg_irq_funcs;
6253 adev->gfx.priv_inst_irq.num_types = 1;
6254 adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs;
6256 adev->gfx.rlc_gc_fed_irq.num_types = 1; /* 0x80 FED error */
6257 adev->gfx.rlc_gc_fed_irq.funcs = &gfx_v11_0_rlc_gc_fed_irq_funcs;
6261 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev)
6263 if (adev->flags & AMD_IS_APU)
6264 adev->gfx.imu.mode = MISSION_MODE;
6266 adev->gfx.imu.mode = DEBUG_MODE;
6268 adev->gfx.imu.funcs = &gfx_v11_0_imu_funcs;
6271 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev)
6273 adev->gfx.rlc.funcs = &gfx_v11_0_rlc_funcs;
6276 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev)
6278 unsigned total_cu = adev->gfx.config.max_cu_per_sh *
6279 adev->gfx.config.max_sh_per_se *
6280 adev->gfx.config.max_shader_engines;
6282 adev->gds.gds_size = 0x1000;
6283 adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1;
6284 adev->gds.gws_size = 64;
6285 adev->gds.oa_size = 16;
6288 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev)
6290 /* set gfx eng mqd */
6291 adev->mqds[AMDGPU_HW_IP_GFX].mqd_size =
6292 sizeof(struct v11_gfx_mqd);
6293 adev->mqds[AMDGPU_HW_IP_GFX].init_mqd =
6294 gfx_v11_0_gfx_mqd_init;
6295 /* set compute eng mqd */
6296 adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size =
6297 sizeof(struct v11_compute_mqd);
6298 adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd =
6299 gfx_v11_0_compute_mqd_init;
6302 static void gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev,
6310 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
6311 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
6313 WREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG, data);
6316 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev)
6318 u32 data, wgp_bitmask;
6319 data = RREG32_SOC15(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG);
6320 data |= RREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG);
6322 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
6323 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
6326 amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1);
6328 return (~data) & wgp_bitmask;
6331 static u32 gfx_v11_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev)
6333 u32 wgp_idx, wgp_active_bitmap;
6334 u32 cu_bitmap_per_wgp, cu_active_bitmap;
6336 wgp_active_bitmap = gfx_v11_0_get_wgp_active_bitmap_per_sh(adev);
6337 cu_active_bitmap = 0;
6339 for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) {
6340 /* if there is one WGP enabled, it means 2 CUs will be enabled */
6341 cu_bitmap_per_wgp = 3 << (2 * wgp_idx);
6342 if (wgp_active_bitmap & (1 << wgp_idx))
6343 cu_active_bitmap |= cu_bitmap_per_wgp;
6346 return cu_active_bitmap;
6349 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
6350 struct amdgpu_cu_info *cu_info)
6352 int i, j, k, counter, active_cu_number = 0;
6354 unsigned disable_masks[8 * 2];
6356 if (!adev || !cu_info)
6359 amdgpu_gfx_parse_disable_cu(disable_masks, 8, 2);
6361 mutex_lock(&adev->grbm_idx_mutex);
6362 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
6363 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
6364 bitmap = i * adev->gfx.config.max_sh_per_se + j;
6365 if (!((gfx_v11_0_get_sa_active_bitmap(adev) >> bitmap) & 1))
6369 gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff, 0);
6371 gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(
6372 adev, disable_masks[i * 2 + j]);
6373 bitmap = gfx_v11_0_get_cu_active_bitmap_per_sh(adev);
6376 * GFX11 could support more than 4 SEs, while the bitmap
6377 * in cu_info struct is 4x4 and ioctl interface struct
6378 * drm_amdgpu_info_device should keep stable.
6379 * So we use last two columns of bitmap to store cu mask for
6380 * SEs 4 to 7, the layout of the bitmap is as below:
6381 * SE0: {SH0,SH1} --> {bitmap[0][0], bitmap[0][1]}
6382 * SE1: {SH0,SH1} --> {bitmap[1][0], bitmap[1][1]}
6383 * SE2: {SH0,SH1} --> {bitmap[2][0], bitmap[2][1]}
6384 * SE3: {SH0,SH1} --> {bitmap[3][0], bitmap[3][1]}
6385 * SE4: {SH0,SH1} --> {bitmap[0][2], bitmap[0][3]}
6386 * SE5: {SH0,SH1} --> {bitmap[1][2], bitmap[1][3]}
6387 * SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]}
6388 * SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]}
6390 cu_info->bitmap[0][i % 4][j + (i / 4) * 2] = bitmap;
6392 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
6398 active_cu_number += counter;
6401 gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
6402 mutex_unlock(&adev->grbm_idx_mutex);
6404 cu_info->number = active_cu_number;
6405 cu_info->simd_per_cu = NUM_SIMD_PER_CU;
6410 const struct amdgpu_ip_block_version gfx_v11_0_ip_block =
6412 .type = AMD_IP_BLOCK_TYPE_GFX,
6416 .funcs = &gfx_v11_0_ip_funcs,