2 * Copyright 2021 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/delay.h>
24 #include <linux/kernel.h>
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
29 #include "amdgpu_gfx.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_smu.h"
32 #include "amdgpu_atomfirmware.h"
33 #include "imu_v11_0.h"
37 #include "gc/gc_11_0_0_offset.h"
38 #include "gc/gc_11_0_0_sh_mask.h"
39 #include "smuio/smuio_13_0_6_offset.h"
40 #include "smuio/smuio_13_0_6_sh_mask.h"
41 #include "navi10_enum.h"
42 #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
46 #include "clearstate_gfx11.h"
47 #include "v11_structs.h"
48 #include "gfx_v11_0.h"
49 #include "gfx_v11_0_3.h"
50 #include "nbio_v4_3.h"
51 #include "mes_v11_0.h"
53 #define GFX11_NUM_GFX_RINGS 1
54 #define GFX11_MEC_HPD_SIZE 2048
56 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
57 #define RLC_PG_DELAY_3_DEFAULT_GC_11_0_1 0x1388
59 #define regCGTT_WD_CLK_CTRL 0x5086
60 #define regCGTT_WD_CLK_CTRL_BASE_IDX 1
61 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1 0x4e7e
62 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1_BASE_IDX 1
64 MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin");
65 MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin");
66 MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin");
67 MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin");
68 MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin");
69 MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin");
70 MODULE_FIRMWARE("amdgpu/gc_11_0_1_me.bin");
71 MODULE_FIRMWARE("amdgpu/gc_11_0_1_mec.bin");
72 MODULE_FIRMWARE("amdgpu/gc_11_0_1_rlc.bin");
73 MODULE_FIRMWARE("amdgpu/gc_11_0_2_pfp.bin");
74 MODULE_FIRMWARE("amdgpu/gc_11_0_2_me.bin");
75 MODULE_FIRMWARE("amdgpu/gc_11_0_2_mec.bin");
76 MODULE_FIRMWARE("amdgpu/gc_11_0_2_rlc.bin");
77 MODULE_FIRMWARE("amdgpu/gc_11_0_3_pfp.bin");
78 MODULE_FIRMWARE("amdgpu/gc_11_0_3_me.bin");
79 MODULE_FIRMWARE("amdgpu/gc_11_0_3_mec.bin");
80 MODULE_FIRMWARE("amdgpu/gc_11_0_3_rlc.bin");
81 MODULE_FIRMWARE("amdgpu/gc_11_0_4_pfp.bin");
82 MODULE_FIRMWARE("amdgpu/gc_11_0_4_me.bin");
83 MODULE_FIRMWARE("amdgpu/gc_11_0_4_mec.bin");
84 MODULE_FIRMWARE("amdgpu/gc_11_0_4_rlc.bin");
86 static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
88 SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_GS_NGG_CLK_CTRL, 0x9fff8fff, 0x00000010),
89 SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_WD_CLK_CTRL, 0xffff8fff, 0x00000010),
90 SOC15_REG_GOLDEN_VALUE(GC, 0, regCPF_GCR_CNTL, 0x0007ffff, 0x0000c200),
91 SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xffff001b, 0x00f01988),
92 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf0ffffff, 0x00880007),
93 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_ENHANCE_3, 0xfffffffd, 0x00000008),
94 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_VRS_SURFACE_CNTL_1, 0xfff891ff, 0x55480100),
95 SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000),
96 SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xfcffffff, 0x0000000a)
99 #define DEFAULT_SH_MEM_CONFIG \
100 ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
101 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
102 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
104 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev);
105 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev);
106 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev);
107 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev);
108 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev);
109 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev);
110 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev);
111 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
112 struct amdgpu_cu_info *cu_info);
113 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev);
114 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
115 u32 sh_num, u32 instance, int xcc_id);
116 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev);
118 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
119 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
120 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
122 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
123 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
124 uint16_t pasid, uint32_t flush_type,
125 bool all_hub, uint8_t dst_sel);
126 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
127 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
128 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
131 static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
133 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
134 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
135 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
136 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
137 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
138 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
139 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
140 amdgpu_ring_write(kiq_ring, 0); /* oac mask */
141 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
144 static void gfx11_kiq_map_queues(struct amdgpu_ring *kiq_ring,
145 struct amdgpu_ring *ring)
147 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
148 uint64_t wptr_addr = ring->wptr_gpu_addr;
149 uint32_t me = 0, eng_sel = 0;
151 switch (ring->funcs->type) {
152 case AMDGPU_RING_TYPE_COMPUTE:
156 case AMDGPU_RING_TYPE_GFX:
160 case AMDGPU_RING_TYPE_MES:
168 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
169 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
170 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
171 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
172 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
173 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
174 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
175 PACKET3_MAP_QUEUES_ME((me)) |
176 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
177 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
178 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
179 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
180 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
181 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
182 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
183 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
184 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
187 static void gfx11_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
188 struct amdgpu_ring *ring,
189 enum amdgpu_unmap_queues_action action,
190 u64 gpu_addr, u64 seq)
192 struct amdgpu_device *adev = kiq_ring->adev;
193 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
195 if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
196 amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq);
200 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
201 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
202 PACKET3_UNMAP_QUEUES_ACTION(action) |
203 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
204 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
205 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
206 amdgpu_ring_write(kiq_ring,
207 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
209 if (action == PREEMPT_QUEUES_NO_UNMAP) {
210 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
211 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
212 amdgpu_ring_write(kiq_ring, seq);
214 amdgpu_ring_write(kiq_ring, 0);
215 amdgpu_ring_write(kiq_ring, 0);
216 amdgpu_ring_write(kiq_ring, 0);
220 static void gfx11_kiq_query_status(struct amdgpu_ring *kiq_ring,
221 struct amdgpu_ring *ring,
225 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
227 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
228 amdgpu_ring_write(kiq_ring,
229 PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
230 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
231 PACKET3_QUERY_STATUS_COMMAND(2));
232 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
233 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
234 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
235 amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
236 amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
237 amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
238 amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
241 static void gfx11_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
242 uint16_t pasid, uint32_t flush_type,
245 gfx_v11_0_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1);
248 static const struct kiq_pm4_funcs gfx_v11_0_kiq_pm4_funcs = {
249 .kiq_set_resources = gfx11_kiq_set_resources,
250 .kiq_map_queues = gfx11_kiq_map_queues,
251 .kiq_unmap_queues = gfx11_kiq_unmap_queues,
252 .kiq_query_status = gfx11_kiq_query_status,
253 .kiq_invalidate_tlbs = gfx11_kiq_invalidate_tlbs,
254 .set_resources_size = 8,
255 .map_queues_size = 7,
256 .unmap_queues_size = 6,
257 .query_status_size = 7,
258 .invalidate_tlbs_size = 2,
261 static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
263 adev->gfx.kiq[0].pmf = &gfx_v11_0_kiq_pm4_funcs;
266 static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
268 switch (adev->ip_versions[GC_HWIP][0]) {
269 case IP_VERSION(11, 0, 1):
270 case IP_VERSION(11, 0, 4):
271 soc15_program_register_sequence(adev,
272 golden_settings_gc_11_0_1,
273 (const u32)ARRAY_SIZE(golden_settings_gc_11_0_1));
280 static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
281 bool wc, uint32_t reg, uint32_t val)
283 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
284 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
285 WRITE_DATA_DST_SEL(0) | (wc ? WR_CONFIRM : 0));
286 amdgpu_ring_write(ring, reg);
287 amdgpu_ring_write(ring, 0);
288 amdgpu_ring_write(ring, val);
291 static void gfx_v11_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
292 int mem_space, int opt, uint32_t addr0,
293 uint32_t addr1, uint32_t ref, uint32_t mask,
296 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
297 amdgpu_ring_write(ring,
298 /* memory (1) or register (0) */
299 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
300 WAIT_REG_MEM_OPERATION(opt) | /* wait */
301 WAIT_REG_MEM_FUNCTION(3) | /* equal */
302 WAIT_REG_MEM_ENGINE(eng_sel)));
305 BUG_ON(addr0 & 0x3); /* Dword align */
306 amdgpu_ring_write(ring, addr0);
307 amdgpu_ring_write(ring, addr1);
308 amdgpu_ring_write(ring, ref);
309 amdgpu_ring_write(ring, mask);
310 amdgpu_ring_write(ring, inv); /* poll interval */
313 static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring)
315 struct amdgpu_device *adev = ring->adev;
316 uint32_t scratch = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
321 WREG32(scratch, 0xCAFEDEAD);
322 r = amdgpu_ring_alloc(ring, 5);
324 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
329 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) {
330 gfx_v11_0_ring_emit_wreg(ring, scratch, 0xDEADBEEF);
332 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
333 amdgpu_ring_write(ring, scratch -
334 PACKET3_SET_UCONFIG_REG_START);
335 amdgpu_ring_write(ring, 0xDEADBEEF);
337 amdgpu_ring_commit(ring);
339 for (i = 0; i < adev->usec_timeout; i++) {
340 tmp = RREG32(scratch);
341 if (tmp == 0xDEADBEEF)
343 if (amdgpu_emu_mode == 1)
349 if (i >= adev->usec_timeout)
354 static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
356 struct amdgpu_device *adev = ring->adev;
358 struct dma_fence *f = NULL;
361 volatile uint32_t *cpu_ptr;
364 /* MES KIQ fw hasn't indirect buffer support for now */
365 if (adev->enable_mes_kiq &&
366 ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
369 memset(&ib, 0, sizeof(ib));
371 if (ring->is_mes_queue) {
372 uint32_t padding, offset;
374 offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
375 padding = amdgpu_mes_ctx_get_offs(ring,
376 AMDGPU_MES_CTX_PADDING_OFFS);
378 ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
379 ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
381 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding);
382 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding);
383 *cpu_ptr = cpu_to_le32(0xCAFEDEAD);
385 r = amdgpu_device_wb_get(adev, &index);
389 gpu_addr = adev->wb.gpu_addr + (index * 4);
390 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
391 cpu_ptr = &adev->wb.wb[index];
393 r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
395 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
400 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
401 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
402 ib.ptr[2] = lower_32_bits(gpu_addr);
403 ib.ptr[3] = upper_32_bits(gpu_addr);
404 ib.ptr[4] = 0xDEADBEEF;
407 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
411 r = dma_fence_wait_timeout(f, false, timeout);
419 if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF)
424 if (!ring->is_mes_queue)
425 amdgpu_ib_free(adev, &ib, NULL);
428 if (!ring->is_mes_queue)
429 amdgpu_device_wb_free(adev, index);
433 static void gfx_v11_0_free_microcode(struct amdgpu_device *adev)
435 amdgpu_ucode_release(&adev->gfx.pfp_fw);
436 amdgpu_ucode_release(&adev->gfx.me_fw);
437 amdgpu_ucode_release(&adev->gfx.rlc_fw);
438 amdgpu_ucode_release(&adev->gfx.mec_fw);
440 kfree(adev->gfx.rlc.register_list_format);
443 static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev, const char *ucode_prefix)
445 const struct psp_firmware_header_v1_0 *toc_hdr;
449 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", ucode_prefix);
450 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name);
454 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
455 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
456 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
457 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
458 adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
459 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
462 amdgpu_ucode_release(&adev->psp.toc_fw);
466 static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev)
468 switch (adev->ip_versions[GC_HWIP][0]) {
469 case IP_VERSION(11, 0, 0):
470 case IP_VERSION(11, 0, 2):
471 case IP_VERSION(11, 0, 3):
472 if ((adev->gfx.me_fw_version >= 1505) &&
473 (adev->gfx.pfp_fw_version >= 1600) &&
474 (adev->gfx.mec_fw_version >= 512)) {
475 if (amdgpu_sriov_vf(adev))
476 adev->gfx.cp_gfx_shadow = true;
478 adev->gfx.cp_gfx_shadow = false;
482 adev->gfx.cp_gfx_shadow = false;
487 static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
490 char ucode_prefix[30];
492 const struct rlc_firmware_header_v2_0 *rlc_hdr;
493 uint16_t version_major;
494 uint16_t version_minor;
498 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
500 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", ucode_prefix);
501 err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
504 /* check pfp fw hdr version to decide if enable rs64 for gfx11.*/
505 adev->gfx.rs64_enable = amdgpu_ucode_hdr_version(
506 (union amdgpu_firmware_header *)
507 adev->gfx.pfp_fw->data, 2, 0);
508 if (adev->gfx.rs64_enable) {
509 dev_info(adev->dev, "CP RS64 enable\n");
510 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP);
511 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK);
512 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK);
514 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
517 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", ucode_prefix);
518 err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
521 if (adev->gfx.rs64_enable) {
522 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME);
523 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK);
524 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK);
526 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
529 if (!amdgpu_sriov_vf(adev)) {
530 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix);
531 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
534 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
535 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
536 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
537 err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
542 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", ucode_prefix);
543 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
546 if (adev->gfx.rs64_enable) {
547 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC);
548 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK);
549 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK);
550 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK);
551 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK);
553 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
554 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
557 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
558 err = gfx_v11_0_init_toc_microcode(adev, ucode_prefix);
560 /* only one MEC for gfx 11.0.0. */
561 adev->gfx.mec2_fw = NULL;
563 gfx_v11_0_check_fw_cp_gfx_shadow(adev);
566 amdgpu_ucode_release(&adev->gfx.pfp_fw);
567 amdgpu_ucode_release(&adev->gfx.me_fw);
568 amdgpu_ucode_release(&adev->gfx.rlc_fw);
569 amdgpu_ucode_release(&adev->gfx.mec_fw);
575 static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev)
578 const struct cs_section_def *sect = NULL;
579 const struct cs_extent_def *ext = NULL;
581 /* begin clear state */
583 /* context control state */
586 for (sect = gfx11_cs_data; sect->section != NULL; ++sect) {
587 for (ext = sect->section; ext->extent != NULL; ++ext) {
588 if (sect->id == SECT_CONTEXT)
589 count += 2 + ext->reg_count;
595 /* set PA_SC_TILE_STEERING_OVERRIDE */
597 /* end clear state */
605 static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev,
606 volatile u32 *buffer)
609 const struct cs_section_def *sect = NULL;
610 const struct cs_extent_def *ext = NULL;
613 if (adev->gfx.rlc.cs_data == NULL)
618 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
619 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
621 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
622 buffer[count++] = cpu_to_le32(0x80000000);
623 buffer[count++] = cpu_to_le32(0x80000000);
625 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
626 for (ext = sect->section; ext->extent != NULL; ++ext) {
627 if (sect->id == SECT_CONTEXT) {
629 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
630 buffer[count++] = cpu_to_le32(ext->reg_index -
631 PACKET3_SET_CONTEXT_REG_START);
632 for (i = 0; i < ext->reg_count; i++)
633 buffer[count++] = cpu_to_le32(ext->extent[i]);
641 SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
642 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
643 buffer[count++] = cpu_to_le32(ctx_reg_offset);
644 buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override);
646 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
647 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
649 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
650 buffer[count++] = cpu_to_le32(0);
653 static void gfx_v11_0_rlc_fini(struct amdgpu_device *adev)
655 /* clear state block */
656 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
657 &adev->gfx.rlc.clear_state_gpu_addr,
658 (void **)&adev->gfx.rlc.cs_ptr);
660 /* jump table block */
661 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
662 &adev->gfx.rlc.cp_table_gpu_addr,
663 (void **)&adev->gfx.rlc.cp_table_ptr);
666 static void gfx_v11_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
668 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
670 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0];
671 reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
672 reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG1);
673 reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG2);
674 reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG3);
675 reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_CNTL);
676 reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_INDEX);
677 reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, regRLC_SPARE_INT_0);
678 adev->gfx.rlc.rlcg_reg_access_supported = true;
681 static int gfx_v11_0_rlc_init(struct amdgpu_device *adev)
683 const struct cs_section_def *cs_data;
686 adev->gfx.rlc.cs_data = gfx11_cs_data;
688 cs_data = adev->gfx.rlc.cs_data;
691 /* init clear state block */
692 r = amdgpu_gfx_rlc_init_csb(adev);
697 /* init spm vmid with 0xf */
698 if (adev->gfx.rlc.funcs->update_spm_vmid)
699 adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
704 static void gfx_v11_0_mec_fini(struct amdgpu_device *adev)
706 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
707 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
708 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL);
711 static void gfx_v11_0_me_init(struct amdgpu_device *adev)
713 bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
715 amdgpu_gfx_graphics_queue_acquire(adev);
718 static int gfx_v11_0_mec_init(struct amdgpu_device *adev)
724 bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
726 /* take ownership of the relevant compute queues */
727 amdgpu_gfx_compute_queue_acquire(adev);
728 mec_hpd_size = adev->gfx.num_compute_rings * GFX11_MEC_HPD_SIZE;
731 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
732 AMDGPU_GEM_DOMAIN_GTT,
733 &adev->gfx.mec.hpd_eop_obj,
734 &adev->gfx.mec.hpd_eop_gpu_addr,
737 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
738 gfx_v11_0_mec_fini(adev);
742 memset(hpd, 0, mec_hpd_size);
744 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
745 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
751 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address)
753 WREG32_SOC15(GC, 0, regSQ_IND_INDEX,
754 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
755 (address << SQ_IND_INDEX__INDEX__SHIFT));
756 return RREG32_SOC15(GC, 0, regSQ_IND_DATA);
759 static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave,
760 uint32_t thread, uint32_t regno,
761 uint32_t num, uint32_t *out)
763 WREG32_SOC15(GC, 0, regSQ_IND_INDEX,
764 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
765 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
766 (thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) |
767 (SQ_IND_INDEX__AUTO_INCR_MASK));
769 *(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA);
772 static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
774 /* in gfx11 the SIMD_ID is specified as part of the INSTANCE
775 * field when performing a select_se_sh so it should be
779 /* type 3 wave data */
780 dst[(*no_fields)++] = 3;
781 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS);
782 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO);
783 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI);
784 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO);
785 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI);
786 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1);
787 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2);
788 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC);
789 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC);
790 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAPSTS);
791 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS);
792 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2);
793 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1);
794 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0);
795 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE);
798 static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
799 uint32_t wave, uint32_t start,
800 uint32_t size, uint32_t *dst)
805 adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size,
809 static void gfx_v11_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
810 uint32_t wave, uint32_t thread,
811 uint32_t start, uint32_t size,
816 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
819 static void gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev,
820 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
822 soc21_grbm_select(adev, me, pipe, q, vm);
825 /* all sizes are in bytes */
826 #define MQD_SHADOW_BASE_SIZE 73728
827 #define MQD_SHADOW_BASE_ALIGNMENT 256
828 #define MQD_FWWORKAREA_SIZE 484
829 #define MQD_FWWORKAREA_ALIGNMENT 256
831 static int gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device *adev,
832 struct amdgpu_gfx_shadow_info *shadow_info)
834 if (adev->gfx.cp_gfx_shadow) {
835 shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE;
836 shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT;
837 shadow_info->csa_size = MQD_FWWORKAREA_SIZE;
838 shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT;
841 memset(shadow_info, 0, sizeof(struct amdgpu_gfx_shadow_info));
846 static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = {
847 .get_gpu_clock_counter = &gfx_v11_0_get_gpu_clock_counter,
848 .select_se_sh = &gfx_v11_0_select_se_sh,
849 .read_wave_data = &gfx_v11_0_read_wave_data,
850 .read_wave_sgprs = &gfx_v11_0_read_wave_sgprs,
851 .read_wave_vgprs = &gfx_v11_0_read_wave_vgprs,
852 .select_me_pipe_q = &gfx_v11_0_select_me_pipe_q,
853 .update_perfmon_mgcg = &gfx_v11_0_update_perf_clk,
854 .get_gfx_shadow_info = &gfx_v11_0_get_gfx_shadow_info,
857 static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
860 switch (adev->ip_versions[GC_HWIP][0]) {
861 case IP_VERSION(11, 0, 0):
862 case IP_VERSION(11, 0, 2):
863 adev->gfx.config.max_hw_contexts = 8;
864 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
865 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
866 adev->gfx.config.sc_hiz_tile_fifo_size = 0;
867 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
869 case IP_VERSION(11, 0, 3):
870 adev->gfx.ras = &gfx_v11_0_3_ras;
871 adev->gfx.config.max_hw_contexts = 8;
872 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
873 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
874 adev->gfx.config.sc_hiz_tile_fifo_size = 0;
875 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
877 case IP_VERSION(11, 0, 1):
878 case IP_VERSION(11, 0, 4):
879 adev->gfx.config.max_hw_contexts = 8;
880 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
881 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
882 adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
883 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x300;
893 static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
894 int me, int pipe, int queue)
897 struct amdgpu_ring *ring;
898 unsigned int irq_type;
900 ring = &adev->gfx.gfx_ring[ring_id];
906 ring->ring_obj = NULL;
907 ring->use_doorbell = true;
910 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
912 ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1;
913 ring->vm_hub = AMDGPU_GFXHUB(0);
914 sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
916 irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
917 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
918 AMDGPU_RING_PRIO_DEFAULT, NULL);
924 static int gfx_v11_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
925 int mec, int pipe, int queue)
929 struct amdgpu_ring *ring;
930 unsigned int hw_prio;
932 ring = &adev->gfx.compute_ring[ring_id];
939 ring->ring_obj = NULL;
940 ring->use_doorbell = true;
941 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
942 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
943 + (ring_id * GFX11_MEC_HPD_SIZE);
944 ring->vm_hub = AMDGPU_GFXHUB(0);
945 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
947 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
948 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
950 hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
951 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
952 /* type-2 packets are deprecated on MEC, use type-3 instead */
953 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
962 SOC21_FIRMWARE_ID id;
965 } rlc_autoload_info[SOC21_FIRMWARE_ID_MAX];
967 static void gfx_v11_0_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc)
969 RLC_TABLE_OF_CONTENT *ucode = rlc_toc;
971 while (ucode && (ucode->id > SOC21_FIRMWARE_ID_INVALID) &&
972 (ucode->id < SOC21_FIRMWARE_ID_MAX)) {
973 rlc_autoload_info[ucode->id].id = ucode->id;
974 rlc_autoload_info[ucode->id].offset = ucode->offset * 4;
975 rlc_autoload_info[ucode->id].size = ucode->size * 4;
981 static uint32_t gfx_v11_0_calc_toc_total_size(struct amdgpu_device *adev)
983 uint32_t total_size = 0;
984 SOC21_FIRMWARE_ID id;
986 gfx_v11_0_parse_rlc_toc(adev, adev->psp.toc.start_addr);
988 for (id = SOC21_FIRMWARE_ID_RLC_G_UCODE; id < SOC21_FIRMWARE_ID_MAX; id++)
989 total_size += rlc_autoload_info[id].size;
991 /* In case the offset in rlc toc ucode is aligned */
992 if (total_size < rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset)
993 total_size = rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset +
994 rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].size;
999 static int gfx_v11_0_rlc_autoload_buffer_init(struct amdgpu_device *adev)
1002 uint32_t total_size;
1004 total_size = gfx_v11_0_calc_toc_total_size(adev);
1006 r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024,
1007 AMDGPU_GEM_DOMAIN_VRAM |
1008 AMDGPU_GEM_DOMAIN_GTT,
1009 &adev->gfx.rlc.rlc_autoload_bo,
1010 &adev->gfx.rlc.rlc_autoload_gpu_addr,
1011 (void **)&adev->gfx.rlc.rlc_autoload_ptr);
1014 dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r);
1021 static void gfx_v11_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev,
1022 SOC21_FIRMWARE_ID id,
1023 const void *fw_data,
1025 uint32_t *fw_autoload_mask)
1027 uint32_t toc_offset;
1028 uint32_t toc_fw_size;
1029 char *ptr = adev->gfx.rlc.rlc_autoload_ptr;
1031 if (id <= SOC21_FIRMWARE_ID_INVALID || id >= SOC21_FIRMWARE_ID_MAX)
1034 toc_offset = rlc_autoload_info[id].offset;
1035 toc_fw_size = rlc_autoload_info[id].size;
1038 fw_size = toc_fw_size;
1040 if (fw_size > toc_fw_size)
1041 fw_size = toc_fw_size;
1043 memcpy(ptr + toc_offset, fw_data, fw_size);
1045 if (fw_size < toc_fw_size)
1046 memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size);
1048 if ((id != SOC21_FIRMWARE_ID_RS64_PFP) && (id != SOC21_FIRMWARE_ID_RS64_ME))
1049 *(uint64_t *)fw_autoload_mask |= 1ULL << id;
1052 static void gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev,
1053 uint32_t *fw_autoload_mask)
1059 *(uint64_t *)fw_autoload_mask |= 0x1;
1061 DRM_DEBUG("rlc autoload enabled fw: 0x%llx\n", *(uint64_t *)fw_autoload_mask);
1063 data = adev->psp.toc.start_addr;
1064 size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_TOC].size;
1066 toc_ptr = (uint64_t *)data + size / 8 - 1;
1067 *toc_ptr = *(uint64_t *)fw_autoload_mask;
1069 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_TOC,
1070 data, size, fw_autoload_mask);
1073 static void gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev,
1074 uint32_t *fw_autoload_mask)
1076 const __le32 *fw_data;
1078 const struct gfx_firmware_header_v1_0 *cp_hdr;
1079 const struct gfx_firmware_header_v2_0 *cpv2_hdr;
1080 const struct rlc_firmware_header_v2_0 *rlc_hdr;
1081 const struct rlc_firmware_header_v2_2 *rlcv22_hdr;
1082 uint16_t version_major, version_minor;
1084 if (adev->gfx.rs64_enable) {
1086 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1087 adev->gfx.pfp_fw->data;
1089 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1090 le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1091 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1092 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP,
1093 fw_data, fw_size, fw_autoload_mask);
1095 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1096 le32_to_cpu(cpv2_hdr->data_offset_bytes));
1097 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1098 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK,
1099 fw_data, fw_size, fw_autoload_mask);
1100 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P1_STACK,
1101 fw_data, fw_size, fw_autoload_mask);
1103 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1104 adev->gfx.me_fw->data;
1106 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1107 le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1108 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1109 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME,
1110 fw_data, fw_size, fw_autoload_mask);
1112 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1113 le32_to_cpu(cpv2_hdr->data_offset_bytes));
1114 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1115 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P0_STACK,
1116 fw_data, fw_size, fw_autoload_mask);
1117 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P1_STACK,
1118 fw_data, fw_size, fw_autoload_mask);
1120 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1121 adev->gfx.mec_fw->data;
1123 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1124 le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1125 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1126 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC,
1127 fw_data, fw_size, fw_autoload_mask);
1129 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1130 le32_to_cpu(cpv2_hdr->data_offset_bytes));
1131 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1132 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK,
1133 fw_data, fw_size, fw_autoload_mask);
1134 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P1_STACK,
1135 fw_data, fw_size, fw_autoload_mask);
1136 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P2_STACK,
1137 fw_data, fw_size, fw_autoload_mask);
1138 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P3_STACK,
1139 fw_data, fw_size, fw_autoload_mask);
1142 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1143 adev->gfx.pfp_fw->data;
1144 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1145 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1146 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1147 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_PFP,
1148 fw_data, fw_size, fw_autoload_mask);
1151 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1152 adev->gfx.me_fw->data;
1153 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1154 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1155 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1156 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_ME,
1157 fw_data, fw_size, fw_autoload_mask);
1160 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1161 adev->gfx.mec_fw->data;
1162 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1163 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1164 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1165 cp_hdr->jt_size * 4;
1166 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_MEC,
1167 fw_data, fw_size, fw_autoload_mask);
1171 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)
1172 adev->gfx.rlc_fw->data;
1173 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1174 le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes));
1175 fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes);
1176 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_G_UCODE,
1177 fw_data, fw_size, fw_autoload_mask);
1179 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1180 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1181 if (version_major == 2) {
1182 if (version_minor >= 2) {
1183 rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
1185 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1186 le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes));
1187 fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes);
1188 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_UCODE,
1189 fw_data, fw_size, fw_autoload_mask);
1191 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1192 le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes));
1193 fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes);
1194 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_DRAM_BOOT,
1195 fw_data, fw_size, fw_autoload_mask);
1200 static void gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev,
1201 uint32_t *fw_autoload_mask)
1203 const __le32 *fw_data;
1205 const struct sdma_firmware_header_v2_0 *sdma_hdr;
1207 sdma_hdr = (const struct sdma_firmware_header_v2_0 *)
1208 adev->sdma.instance[0].fw->data;
1209 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data +
1210 le32_to_cpu(sdma_hdr->header.ucode_array_offset_bytes));
1211 fw_size = le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes);
1213 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1214 SOC21_FIRMWARE_ID_SDMA_UCODE_TH0, fw_data, fw_size, fw_autoload_mask);
1216 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data +
1217 le32_to_cpu(sdma_hdr->ctl_ucode_offset));
1218 fw_size = le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes);
1220 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1221 SOC21_FIRMWARE_ID_SDMA_UCODE_TH1, fw_data, fw_size, fw_autoload_mask);
1224 static void gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev,
1225 uint32_t *fw_autoload_mask)
1227 const __le32 *fw_data;
1229 const struct mes_firmware_header_v1_0 *mes_hdr;
1230 int pipe, ucode_id, data_id;
1232 for (pipe = 0; pipe < 2; pipe++) {
1234 ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P0;
1235 data_id = SOC21_FIRMWARE_ID_RS64_MES_P0_STACK;
1237 ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P1;
1238 data_id = SOC21_FIRMWARE_ID_RS64_MES_P1_STACK;
1241 mes_hdr = (const struct mes_firmware_header_v1_0 *)
1242 adev->mes.fw[pipe]->data;
1244 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1245 le32_to_cpu(mes_hdr->mes_ucode_offset_bytes));
1246 fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
1248 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1249 ucode_id, fw_data, fw_size, fw_autoload_mask);
1251 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1252 le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes));
1253 fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
1255 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1256 data_id, fw_data, fw_size, fw_autoload_mask);
1260 static int gfx_v11_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev)
1262 uint32_t rlc_g_offset, rlc_g_size;
1264 uint32_t autoload_fw_id[2];
1266 memset(autoload_fw_id, 0, sizeof(uint32_t) * 2);
1268 /* RLC autoload sequence 2: copy ucode */
1269 gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(adev, autoload_fw_id);
1270 gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(adev, autoload_fw_id);
1271 gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(adev, autoload_fw_id);
1272 gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(adev, autoload_fw_id);
1274 rlc_g_offset = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].offset;
1275 rlc_g_size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].size;
1276 gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset;
1278 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_HI, upper_32_bits(gpu_addr));
1279 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_LO, lower_32_bits(gpu_addr));
1281 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_SIZE, rlc_g_size);
1283 /* RLC autoload sequence 3: load IMU fw */
1284 if (adev->gfx.imu.funcs->load_microcode)
1285 adev->gfx.imu.funcs->load_microcode(adev);
1286 /* RLC autoload sequence 4 init IMU fw */
1287 if (adev->gfx.imu.funcs->setup_imu)
1288 adev->gfx.imu.funcs->setup_imu(adev);
1289 if (adev->gfx.imu.funcs->start_imu)
1290 adev->gfx.imu.funcs->start_imu(adev);
1292 /* RLC autoload sequence 5 disable gpa mode */
1293 gfx_v11_0_disable_gpa_mode(adev);
1298 static int gfx_v11_0_sw_init(void *handle)
1300 int i, j, k, r, ring_id = 0;
1301 struct amdgpu_kiq *kiq;
1302 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1304 adev->gfxhub.funcs->init(adev);
1306 switch (adev->ip_versions[GC_HWIP][0]) {
1307 case IP_VERSION(11, 0, 0):
1308 case IP_VERSION(11, 0, 2):
1309 case IP_VERSION(11, 0, 3):
1310 adev->gfx.me.num_me = 1;
1311 adev->gfx.me.num_pipe_per_me = 1;
1312 adev->gfx.me.num_queue_per_pipe = 1;
1313 adev->gfx.mec.num_mec = 2;
1314 adev->gfx.mec.num_pipe_per_mec = 4;
1315 adev->gfx.mec.num_queue_per_pipe = 4;
1317 case IP_VERSION(11, 0, 1):
1318 case IP_VERSION(11, 0, 4):
1319 adev->gfx.me.num_me = 1;
1320 adev->gfx.me.num_pipe_per_me = 1;
1321 adev->gfx.me.num_queue_per_pipe = 1;
1322 adev->gfx.mec.num_mec = 1;
1323 adev->gfx.mec.num_pipe_per_mec = 4;
1324 adev->gfx.mec.num_queue_per_pipe = 4;
1327 adev->gfx.me.num_me = 1;
1328 adev->gfx.me.num_pipe_per_me = 1;
1329 adev->gfx.me.num_queue_per_pipe = 1;
1330 adev->gfx.mec.num_mec = 1;
1331 adev->gfx.mec.num_pipe_per_mec = 4;
1332 adev->gfx.mec.num_queue_per_pipe = 8;
1336 /* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */
1337 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3) &&
1338 amdgpu_sriov_is_pp_one_vf(adev))
1339 adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG;
1342 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1343 GFX_11_0_0__SRCID__CP_EOP_INTERRUPT,
1344 &adev->gfx.eop_irq);
1348 /* Privileged reg */
1349 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1350 GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT,
1351 &adev->gfx.priv_reg_irq);
1355 /* Privileged inst */
1356 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1357 GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT,
1358 &adev->gfx.priv_inst_irq);
1363 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
1364 GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT,
1365 &adev->gfx.rlc_gc_fed_irq);
1369 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1371 if (adev->gfx.imu.funcs) {
1372 if (adev->gfx.imu.funcs->init_microcode) {
1373 r = adev->gfx.imu.funcs->init_microcode(adev);
1375 DRM_ERROR("Failed to load imu firmware!\n");
1379 gfx_v11_0_me_init(adev);
1381 r = gfx_v11_0_rlc_init(adev);
1383 DRM_ERROR("Failed to init rlc BOs!\n");
1387 r = gfx_v11_0_mec_init(adev);
1389 DRM_ERROR("Failed to init MEC BOs!\n");
1393 /* set up the gfx ring */
1394 for (i = 0; i < adev->gfx.me.num_me; i++) {
1395 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
1396 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
1397 if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
1400 r = gfx_v11_0_gfx_ring_init(adev, ring_id,
1410 /* set up the compute queues - allocate horizontally across pipes */
1411 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1412 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1413 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1414 if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
1418 r = gfx_v11_0_compute_ring_init(adev, ring_id,
1428 if (!adev->enable_mes_kiq) {
1429 r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE, 0);
1431 DRM_ERROR("Failed to init KIQ BOs!\n");
1435 kiq = &adev->gfx.kiq[0];
1436 r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0);
1441 r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v11_compute_mqd), 0);
1445 /* allocate visible FB for rlc auto-loading fw */
1446 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1447 r = gfx_v11_0_rlc_autoload_buffer_init(adev);
1452 r = gfx_v11_0_gpu_early_init(adev);
1456 if (amdgpu_gfx_ras_sw_init(adev)) {
1457 dev_err(adev->dev, "Failed to initialize gfx ras block!\n");
1464 static void gfx_v11_0_pfp_fini(struct amdgpu_device *adev)
1466 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj,
1467 &adev->gfx.pfp.pfp_fw_gpu_addr,
1468 (void **)&adev->gfx.pfp.pfp_fw_ptr);
1470 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_data_obj,
1471 &adev->gfx.pfp.pfp_fw_data_gpu_addr,
1472 (void **)&adev->gfx.pfp.pfp_fw_data_ptr);
1475 static void gfx_v11_0_me_fini(struct amdgpu_device *adev)
1477 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj,
1478 &adev->gfx.me.me_fw_gpu_addr,
1479 (void **)&adev->gfx.me.me_fw_ptr);
1481 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_data_obj,
1482 &adev->gfx.me.me_fw_data_gpu_addr,
1483 (void **)&adev->gfx.me.me_fw_data_ptr);
1486 static void gfx_v11_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev)
1488 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo,
1489 &adev->gfx.rlc.rlc_autoload_gpu_addr,
1490 (void **)&adev->gfx.rlc.rlc_autoload_ptr);
1493 static int gfx_v11_0_sw_fini(void *handle)
1496 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1498 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1499 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1500 for (i = 0; i < adev->gfx.num_compute_rings; i++)
1501 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1503 amdgpu_gfx_mqd_sw_fini(adev, 0);
1505 if (!adev->enable_mes_kiq) {
1506 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
1507 amdgpu_gfx_kiq_fini(adev, 0);
1510 gfx_v11_0_pfp_fini(adev);
1511 gfx_v11_0_me_fini(adev);
1512 gfx_v11_0_rlc_fini(adev);
1513 gfx_v11_0_mec_fini(adev);
1515 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
1516 gfx_v11_0_rlc_autoload_buffer_fini(adev);
1518 gfx_v11_0_free_microcode(adev);
1523 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
1524 u32 sh_num, u32 instance, int xcc_id)
1528 if (instance == 0xffffffff)
1529 data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
1530 INSTANCE_BROADCAST_WRITES, 1);
1532 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
1535 if (se_num == 0xffffffff)
1536 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
1539 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1541 if (sh_num == 0xffffffff)
1542 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES,
1545 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num);
1547 WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, data);
1550 static u32 gfx_v11_0_get_sa_active_bitmap(struct amdgpu_device *adev)
1552 u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask;
1554 gc_disabled_sa_mask = RREG32_SOC15(GC, 0, regCC_GC_SA_UNIT_DISABLE);
1555 gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask,
1556 CC_GC_SA_UNIT_DISABLE,
1558 gc_user_disabled_sa_mask = RREG32_SOC15(GC, 0, regGC_USER_SA_UNIT_DISABLE);
1559 gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask,
1560 GC_USER_SA_UNIT_DISABLE,
1562 sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se *
1563 adev->gfx.config.max_shader_engines);
1565 return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask));
1568 static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1570 u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask;
1573 gc_disabled_rb_mask = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE);
1574 gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask,
1575 CC_RB_BACKEND_DISABLE,
1577 gc_user_disabled_rb_mask = RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE);
1578 gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask,
1579 GC_USER_RB_BACKEND_DISABLE,
1581 rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se *
1582 adev->gfx.config.max_shader_engines);
1584 return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask));
1587 static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
1589 u32 rb_bitmap_width_per_sa;
1591 u32 active_sa_bitmap;
1592 u32 global_active_rb_bitmap;
1593 u32 active_rb_bitmap = 0;
1596 /* query sa bitmap from SA_UNIT_DISABLE registers */
1597 active_sa_bitmap = gfx_v11_0_get_sa_active_bitmap(adev);
1598 /* query rb bitmap from RB_BACKEND_DISABLE registers */
1599 global_active_rb_bitmap = gfx_v11_0_get_rb_active_bitmap(adev);
1601 /* generate active rb bitmap according to active sa bitmap */
1602 max_sa = adev->gfx.config.max_shader_engines *
1603 adev->gfx.config.max_sh_per_se;
1604 rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
1605 adev->gfx.config.max_sh_per_se;
1606 for (i = 0; i < max_sa; i++) {
1607 if (active_sa_bitmap & (1 << i))
1608 active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
1611 active_rb_bitmap |= global_active_rb_bitmap;
1612 adev->gfx.config.backend_enable_mask = active_rb_bitmap;
1613 adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
1616 #define DEFAULT_SH_MEM_BASES (0x6000)
1617 #define LDS_APP_BASE 0x1
1618 #define SCRATCH_APP_BASE 0x2
1620 static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev)
1623 uint32_t sh_mem_bases;
1627 * Configure apertures:
1628 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1629 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1630 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1632 sh_mem_bases = (LDS_APP_BASE << SH_MEM_BASES__SHARED_BASE__SHIFT) |
1635 mutex_lock(&adev->srbm_mutex);
1636 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1637 soc21_grbm_select(adev, 0, 0, 0, i);
1638 /* CP and shaders */
1639 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1640 WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases);
1642 /* Enable trap for each kfd vmid. */
1643 data = RREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL);
1644 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
1645 WREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL, data);
1647 soc21_grbm_select(adev, 0, 0, 0, 0);
1648 mutex_unlock(&adev->srbm_mutex);
1650 /* Initialize all compute VMIDs to have no GDS, GWS, or OA
1651 acccess. These should be enabled by FW for target VMIDs. */
1652 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1653 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * i, 0);
1654 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * i, 0);
1655 WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, i, 0);
1656 WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, i, 0);
1660 static void gfx_v11_0_init_gds_vmid(struct amdgpu_device *adev)
1665 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
1666 * access. Compute VMIDs should be enabled by FW for target VMIDs,
1667 * the driver can enable them for graphics. VMID0 should maintain
1668 * access so that HWS firmware can save/restore entries.
1670 for (vmid = 1; vmid < 16; vmid++) {
1671 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * vmid, 0);
1672 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * vmid, 0);
1673 WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, vmid, 0);
1674 WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, vmid, 0);
1678 static void gfx_v11_0_tcp_harvest(struct amdgpu_device *adev)
1680 /* TODO: harvest feature to be added later. */
1683 static void gfx_v11_0_get_tcc_info(struct amdgpu_device *adev)
1685 /* TCCs are global (not instanced). */
1686 uint32_t tcc_disable = RREG32_SOC15(GC, 0, regCGTS_TCC_DISABLE) |
1687 RREG32_SOC15(GC, 0, regCGTS_USER_TCC_DISABLE);
1689 adev->gfx.config.tcc_disabled_mask =
1690 REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) |
1691 (REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16);
1694 static void gfx_v11_0_constants_init(struct amdgpu_device *adev)
1699 if (!amdgpu_sriov_vf(adev))
1700 WREG32_FIELD15_PREREG(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1702 gfx_v11_0_setup_rb(adev);
1703 gfx_v11_0_get_cu_info(adev, &adev->gfx.cu_info);
1704 gfx_v11_0_get_tcc_info(adev);
1705 adev->gfx.config.pa_sc_tile_steering_override = 0;
1707 /* Set whether texture coordinate truncation is conformant. */
1708 tmp = RREG32_SOC15(GC, 0, regTA_CNTL2);
1709 adev->gfx.config.ta_cntl2_truncate_coord_mode =
1710 REG_GET_FIELD(tmp, TA_CNTL2, TRUNCATE_COORD_MODE);
1712 /* XXX SH_MEM regs */
1713 /* where to put LDS, scratch, GPUVM in FSA64 space */
1714 mutex_lock(&adev->srbm_mutex);
1715 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
1716 soc21_grbm_select(adev, 0, 0, 0, i);
1717 /* CP and shaders */
1718 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1720 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1721 (adev->gmc.private_aperture_start >> 48));
1722 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1723 (adev->gmc.shared_aperture_start >> 48));
1724 WREG32_SOC15(GC, 0, regSH_MEM_BASES, tmp);
1727 soc21_grbm_select(adev, 0, 0, 0, 0);
1729 mutex_unlock(&adev->srbm_mutex);
1731 gfx_v11_0_init_compute_vmid(adev);
1732 gfx_v11_0_init_gds_vmid(adev);
1735 static void gfx_v11_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1740 if (amdgpu_sriov_vf(adev))
1743 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0);
1745 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
1747 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
1749 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
1751 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
1754 WREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0, tmp);
1757 static int gfx_v11_0_init_csb(struct amdgpu_device *adev)
1759 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
1761 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_HI,
1762 adev->gfx.rlc.clear_state_gpu_addr >> 32);
1763 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_LO,
1764 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1765 WREG32_SOC15(GC, 0, regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
1770 static void gfx_v11_0_rlc_stop(struct amdgpu_device *adev)
1772 u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL);
1774 tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
1775 WREG32_SOC15(GC, 0, regRLC_CNTL, tmp);
1778 static void gfx_v11_0_rlc_reset(struct amdgpu_device *adev)
1780 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
1782 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
1786 static void gfx_v11_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev,
1789 uint32_t rlc_pg_cntl;
1791 rlc_pg_cntl = RREG32_SOC15(GC, 0, regRLC_PG_CNTL);
1794 /* RLC_PG_CNTL[23] = 0 (default)
1795 * RLC will wait for handshake acks with SMU
1796 * GFXOFF will be enabled
1797 * RLC_PG_CNTL[23] = 1
1798 * RLC will not issue any message to SMU
1799 * hence no handshake between SMU & RLC
1800 * GFXOFF will be disabled
1802 rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
1804 rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
1805 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, rlc_pg_cntl);
1808 static void gfx_v11_0_rlc_start(struct amdgpu_device *adev)
1810 /* TODO: enable rlc & smu handshake until smu
1811 * and gfxoff feature works as expected */
1812 if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK))
1813 gfx_v11_0_rlc_smu_handshake_cntl(adev, false);
1815 WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
1819 static void gfx_v11_0_rlc_enable_srm(struct amdgpu_device *adev)
1823 /* enable Save Restore Machine */
1824 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL));
1825 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
1826 tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
1827 WREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL), tmp);
1830 static void gfx_v11_0_load_rlcg_microcode(struct amdgpu_device *adev)
1832 const struct rlc_firmware_header_v2_0 *hdr;
1833 const __le32 *fw_data;
1834 unsigned i, fw_size;
1836 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1837 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1838 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1839 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1841 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR,
1842 RLCG_UCODE_LOADING_START_ADDRESS);
1844 for (i = 0; i < fw_size; i++)
1845 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_DATA,
1846 le32_to_cpup(fw_data++));
1848 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
1851 static void gfx_v11_0_load_rlc_iram_dram_microcode(struct amdgpu_device *adev)
1853 const struct rlc_firmware_header_v2_2 *hdr;
1854 const __le32 *fw_data;
1855 unsigned i, fw_size;
1858 hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
1860 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1861 le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes));
1862 fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4;
1864 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, 0);
1866 for (i = 0; i < fw_size; i++) {
1867 if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1869 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_DATA,
1870 le32_to_cpup(fw_data++));
1873 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
1875 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1876 le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes));
1877 fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4;
1879 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_ADDR, 0);
1880 for (i = 0; i < fw_size; i++) {
1881 if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1883 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_DATA,
1884 le32_to_cpup(fw_data++));
1887 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
1889 tmp = RREG32_SOC15(GC, 0, regRLC_LX6_CNTL);
1890 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1);
1891 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0);
1892 WREG32_SOC15(GC, 0, regRLC_LX6_CNTL, tmp);
1895 static void gfx_v11_0_load_rlcp_rlcv_microcode(struct amdgpu_device *adev)
1897 const struct rlc_firmware_header_v2_3 *hdr;
1898 const __le32 *fw_data;
1899 unsigned i, fw_size;
1902 hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data;
1904 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1905 le32_to_cpu(hdr->rlcp_ucode_offset_bytes));
1906 fw_size = le32_to_cpu(hdr->rlcp_ucode_size_bytes) / 4;
1908 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, 0);
1910 for (i = 0; i < fw_size; i++) {
1911 if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1913 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_DATA,
1914 le32_to_cpup(fw_data++));
1917 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, adev->gfx.rlc_fw_version);
1919 tmp = RREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE);
1920 tmp = REG_SET_FIELD(tmp, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1);
1921 WREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE, tmp);
1923 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1924 le32_to_cpu(hdr->rlcv_ucode_offset_bytes));
1925 fw_size = le32_to_cpu(hdr->rlcv_ucode_size_bytes) / 4;
1927 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, 0);
1929 for (i = 0; i < fw_size; i++) {
1930 if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1932 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_DATA,
1933 le32_to_cpup(fw_data++));
1936 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, adev->gfx.rlc_fw_version);
1938 tmp = RREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL);
1939 tmp = REG_SET_FIELD(tmp, RLC_GPU_IOV_F32_CNTL, ENABLE, 1);
1940 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL, tmp);
1943 static int gfx_v11_0_rlc_load_microcode(struct amdgpu_device *adev)
1945 const struct rlc_firmware_header_v2_0 *hdr;
1946 uint16_t version_major;
1947 uint16_t version_minor;
1949 if (!adev->gfx.rlc_fw)
1952 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1953 amdgpu_ucode_print_rlc_hdr(&hdr->header);
1955 version_major = le16_to_cpu(hdr->header.header_version_major);
1956 version_minor = le16_to_cpu(hdr->header.header_version_minor);
1958 if (version_major == 2) {
1959 gfx_v11_0_load_rlcg_microcode(adev);
1960 if (amdgpu_dpm == 1) {
1961 if (version_minor >= 2)
1962 gfx_v11_0_load_rlc_iram_dram_microcode(adev);
1963 if (version_minor == 3)
1964 gfx_v11_0_load_rlcp_rlcv_microcode(adev);
1973 static int gfx_v11_0_rlc_resume(struct amdgpu_device *adev)
1977 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1978 gfx_v11_0_init_csb(adev);
1980 if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
1981 gfx_v11_0_rlc_enable_srm(adev);
1983 if (amdgpu_sriov_vf(adev)) {
1984 gfx_v11_0_init_csb(adev);
1988 adev->gfx.rlc.funcs->stop(adev);
1991 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, 0);
1994 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, 0);
1996 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1997 /* legacy rlc firmware loading */
1998 r = gfx_v11_0_rlc_load_microcode(adev);
2003 gfx_v11_0_init_csb(adev);
2005 adev->gfx.rlc.funcs->start(adev);
2010 static int gfx_v11_0_config_me_cache(struct amdgpu_device *adev, uint64_t addr)
2012 uint32_t usec_timeout = 50000; /* wait for 50ms */
2016 /* Trigger an invalidation of the L1 instruction caches */
2017 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2018 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2019 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
2021 /* Wait for invalidation complete */
2022 for (i = 0; i < usec_timeout; i++) {
2023 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2024 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2025 INVALIDATE_CACHE_COMPLETE))
2030 if (i >= usec_timeout) {
2031 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2035 if (amdgpu_emu_mode == 1)
2036 adev->hdp.funcs->flush_hdp(adev, NULL);
2038 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
2039 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2040 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2041 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2042 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2043 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
2045 /* Program me ucode address into intruction cache address register */
2046 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
2047 lower_32_bits(addr) & 0xFFFFF000);
2048 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
2049 upper_32_bits(addr));
2054 static int gfx_v11_0_config_pfp_cache(struct amdgpu_device *adev, uint64_t addr)
2056 uint32_t usec_timeout = 50000; /* wait for 50ms */
2060 /* Trigger an invalidation of the L1 instruction caches */
2061 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2062 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2063 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
2065 /* Wait for invalidation complete */
2066 for (i = 0; i < usec_timeout; i++) {
2067 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2068 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2069 INVALIDATE_CACHE_COMPLETE))
2074 if (i >= usec_timeout) {
2075 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2079 if (amdgpu_emu_mode == 1)
2080 adev->hdp.funcs->flush_hdp(adev, NULL);
2082 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
2083 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2084 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2085 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2086 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2087 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
2089 /* Program pfp ucode address into intruction cache address register */
2090 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
2091 lower_32_bits(addr) & 0xFFFFF000);
2092 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
2093 upper_32_bits(addr));
2098 static int gfx_v11_0_config_mec_cache(struct amdgpu_device *adev, uint64_t addr)
2100 uint32_t usec_timeout = 50000; /* wait for 50ms */
2104 /* Trigger an invalidation of the L1 instruction caches */
2105 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2106 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2108 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
2110 /* Wait for invalidation complete */
2111 for (i = 0; i < usec_timeout; i++) {
2112 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2113 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2114 INVALIDATE_CACHE_COMPLETE))
2119 if (i >= usec_timeout) {
2120 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2124 if (amdgpu_emu_mode == 1)
2125 adev->hdp.funcs->flush_hdp(adev, NULL);
2127 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
2128 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2129 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2130 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2131 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
2133 /* Program mec1 ucode address into intruction cache address register */
2134 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO,
2135 lower_32_bits(addr) & 0xFFFFF000);
2136 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
2137 upper_32_bits(addr));
2142 static int gfx_v11_0_config_pfp_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2144 uint32_t usec_timeout = 50000; /* wait for 50ms */
2146 unsigned i, pipe_id;
2147 const struct gfx_firmware_header_v2_0 *pfp_hdr;
2149 pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
2150 adev->gfx.pfp_fw->data;
2152 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
2153 lower_32_bits(addr));
2154 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
2155 upper_32_bits(addr));
2157 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
2158 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2159 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2160 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2161 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
2164 * Programming any of the CP_PFP_IC_BASE registers
2165 * forces invalidation of the ME L1 I$. Wait for the
2166 * invalidation complete
2168 for (i = 0; i < usec_timeout; i++) {
2169 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2170 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2171 INVALIDATE_CACHE_COMPLETE))
2176 if (i >= usec_timeout) {
2177 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2181 /* Prime the L1 instruction caches */
2182 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2183 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1);
2184 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
2185 /* Waiting for cache primed*/
2186 for (i = 0; i < usec_timeout; i++) {
2187 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2188 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2194 if (i >= usec_timeout) {
2195 dev_err(adev->dev, "failed to prime instruction cache\n");
2199 mutex_lock(&adev->srbm_mutex);
2200 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2201 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2202 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
2203 (pfp_hdr->ucode_start_addr_hi << 30) |
2204 (pfp_hdr->ucode_start_addr_lo >> 2));
2205 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
2206 pfp_hdr->ucode_start_addr_hi >> 2);
2209 * Program CP_ME_CNTL to reset given PIPE to take
2210 * effect of CP_PFP_PRGRM_CNTR_START.
2212 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2214 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2215 PFP_PIPE0_RESET, 1);
2217 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2218 PFP_PIPE1_RESET, 1);
2219 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2221 /* Clear pfp pipe0 reset bit. */
2223 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2224 PFP_PIPE0_RESET, 0);
2226 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2227 PFP_PIPE1_RESET, 0);
2228 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2230 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO,
2231 lower_32_bits(addr2));
2232 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI,
2233 upper_32_bits(addr2));
2235 soc21_grbm_select(adev, 0, 0, 0, 0);
2236 mutex_unlock(&adev->srbm_mutex);
2238 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
2239 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
2240 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
2241 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
2243 /* Invalidate the data caches */
2244 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2245 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2246 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
2248 for (i = 0; i < usec_timeout; i++) {
2249 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2250 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
2251 INVALIDATE_DCACHE_COMPLETE))
2256 if (i >= usec_timeout) {
2257 dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
2264 static int gfx_v11_0_config_me_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2266 uint32_t usec_timeout = 50000; /* wait for 50ms */
2268 unsigned i, pipe_id;
2269 const struct gfx_firmware_header_v2_0 *me_hdr;
2271 me_hdr = (const struct gfx_firmware_header_v2_0 *)
2272 adev->gfx.me_fw->data;
2274 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
2275 lower_32_bits(addr));
2276 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
2277 upper_32_bits(addr));
2279 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
2280 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2281 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2282 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2283 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
2286 * Programming any of the CP_ME_IC_BASE registers
2287 * forces invalidation of the ME L1 I$. Wait for the
2288 * invalidation complete
2290 for (i = 0; i < usec_timeout; i++) {
2291 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2292 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2293 INVALIDATE_CACHE_COMPLETE))
2298 if (i >= usec_timeout) {
2299 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2303 /* Prime the instruction caches */
2304 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2305 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1);
2306 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
2308 /* Waiting for instruction cache primed*/
2309 for (i = 0; i < usec_timeout; i++) {
2310 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2311 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2317 if (i >= usec_timeout) {
2318 dev_err(adev->dev, "failed to prime instruction cache\n");
2322 mutex_lock(&adev->srbm_mutex);
2323 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2324 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2325 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
2326 (me_hdr->ucode_start_addr_hi << 30) |
2327 (me_hdr->ucode_start_addr_lo >> 2) );
2328 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
2329 me_hdr->ucode_start_addr_hi>>2);
2332 * Program CP_ME_CNTL to reset given PIPE to take
2333 * effect of CP_PFP_PRGRM_CNTR_START.
2335 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2337 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2340 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2342 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2344 /* Clear pfp pipe0 reset bit. */
2346 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2349 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2351 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2353 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO,
2354 lower_32_bits(addr2));
2355 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI,
2356 upper_32_bits(addr2));
2358 soc21_grbm_select(adev, 0, 0, 0, 0);
2359 mutex_unlock(&adev->srbm_mutex);
2361 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
2362 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
2363 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
2364 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
2366 /* Invalidate the data caches */
2367 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2368 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2369 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
2371 for (i = 0; i < usec_timeout; i++) {
2372 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2373 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
2374 INVALIDATE_DCACHE_COMPLETE))
2379 if (i >= usec_timeout) {
2380 dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
2387 static int gfx_v11_0_config_mec_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2389 uint32_t usec_timeout = 50000; /* wait for 50ms */
2392 const struct gfx_firmware_header_v2_0 *mec_hdr;
2394 mec_hdr = (const struct gfx_firmware_header_v2_0 *)
2395 adev->gfx.mec_fw->data;
2397 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
2398 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
2399 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2400 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2401 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
2403 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL);
2404 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0);
2405 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0);
2406 WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp);
2408 mutex_lock(&adev->srbm_mutex);
2409 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
2410 soc21_grbm_select(adev, 1, i, 0, 0);
2412 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, addr2);
2413 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI,
2414 upper_32_bits(addr2));
2416 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
2417 mec_hdr->ucode_start_addr_lo >> 2 |
2418 mec_hdr->ucode_start_addr_hi << 30);
2419 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
2420 mec_hdr->ucode_start_addr_hi >> 2);
2422 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, addr);
2423 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
2424 upper_32_bits(addr));
2426 mutex_unlock(&adev->srbm_mutex);
2427 soc21_grbm_select(adev, 0, 0, 0, 0);
2429 /* Trigger an invalidation of the L1 instruction caches */
2430 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
2431 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2432 WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp);
2434 /* Wait for invalidation complete */
2435 for (i = 0; i < usec_timeout; i++) {
2436 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
2437 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL,
2438 INVALIDATE_DCACHE_COMPLETE))
2443 if (i >= usec_timeout) {
2444 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2448 /* Trigger an invalidation of the L1 instruction caches */
2449 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2450 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2451 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
2453 /* Wait for invalidation complete */
2454 for (i = 0; i < usec_timeout; i++) {
2455 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2456 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2457 INVALIDATE_CACHE_COMPLETE))
2462 if (i >= usec_timeout) {
2463 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2470 static void gfx_v11_0_config_gfx_rs64(struct amdgpu_device *adev)
2472 const struct gfx_firmware_header_v2_0 *pfp_hdr;
2473 const struct gfx_firmware_header_v2_0 *me_hdr;
2474 const struct gfx_firmware_header_v2_0 *mec_hdr;
2475 uint32_t pipe_id, tmp;
2477 mec_hdr = (const struct gfx_firmware_header_v2_0 *)
2478 adev->gfx.mec_fw->data;
2479 me_hdr = (const struct gfx_firmware_header_v2_0 *)
2480 adev->gfx.me_fw->data;
2481 pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
2482 adev->gfx.pfp_fw->data;
2484 /* config pfp program start addr */
2485 for (pipe_id = 0; pipe_id < 2; pipe_id++) {
2486 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2487 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
2488 (pfp_hdr->ucode_start_addr_hi << 30) |
2489 (pfp_hdr->ucode_start_addr_lo >> 2));
2490 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
2491 pfp_hdr->ucode_start_addr_hi >> 2);
2493 soc21_grbm_select(adev, 0, 0, 0, 0);
2495 /* reset pfp pipe */
2496 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2497 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 1);
2498 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 1);
2499 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2501 /* clear pfp pipe reset */
2502 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 0);
2503 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 0);
2504 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2506 /* config me program start addr */
2507 for (pipe_id = 0; pipe_id < 2; pipe_id++) {
2508 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2509 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
2510 (me_hdr->ucode_start_addr_hi << 30) |
2511 (me_hdr->ucode_start_addr_lo >> 2) );
2512 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
2513 me_hdr->ucode_start_addr_hi>>2);
2515 soc21_grbm_select(adev, 0, 0, 0, 0);
2518 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2519 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 1);
2520 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 1);
2521 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2523 /* clear me pipe reset */
2524 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 0);
2525 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 0);
2526 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2528 /* config mec program start addr */
2529 for (pipe_id = 0; pipe_id < 4; pipe_id++) {
2530 soc21_grbm_select(adev, 1, pipe_id, 0, 0);
2531 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
2532 mec_hdr->ucode_start_addr_lo >> 2 |
2533 mec_hdr->ucode_start_addr_hi << 30);
2534 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
2535 mec_hdr->ucode_start_addr_hi >> 2);
2537 soc21_grbm_select(adev, 0, 0, 0, 0);
2539 /* reset mec pipe */
2540 tmp = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
2541 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1);
2542 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1);
2543 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1);
2544 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1);
2545 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp);
2547 /* clear mec pipe reset */
2548 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 0);
2549 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 0);
2550 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 0);
2551 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 0);
2552 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp);
2555 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
2558 uint32_t bootload_status;
2560 uint64_t addr, addr2;
2562 for (i = 0; i < adev->usec_timeout; i++) {
2563 cp_status = RREG32_SOC15(GC, 0, regCP_STAT);
2565 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 1) ||
2566 adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 4))
2567 bootload_status = RREG32_SOC15(GC, 0,
2568 regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1);
2570 bootload_status = RREG32_SOC15(GC, 0, regRLC_RLCS_BOOTLOAD_STATUS);
2572 if ((cp_status == 0) &&
2573 (REG_GET_FIELD(bootload_status,
2574 RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) {
2580 if (i >= adev->usec_timeout) {
2581 dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n");
2585 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
2586 if (adev->gfx.rs64_enable) {
2587 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2588 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME].offset;
2589 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
2590 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME_P0_STACK].offset;
2591 r = gfx_v11_0_config_me_cache_rs64(adev, addr, addr2);
2594 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2595 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP].offset;
2596 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
2597 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK].offset;
2598 r = gfx_v11_0_config_pfp_cache_rs64(adev, addr, addr2);
2601 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2602 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC].offset;
2603 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
2604 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK].offset;
2605 r = gfx_v11_0_config_mec_cache_rs64(adev, addr, addr2);
2609 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2610 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_ME].offset;
2611 r = gfx_v11_0_config_me_cache(adev, addr);
2614 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2615 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_PFP].offset;
2616 r = gfx_v11_0_config_pfp_cache(adev, addr);
2619 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2620 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_MEC].offset;
2621 r = gfx_v11_0_config_mec_cache(adev, addr);
2630 static int gfx_v11_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2633 u32 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2635 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2636 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2637 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2639 for (i = 0; i < adev->usec_timeout; i++) {
2640 if (RREG32_SOC15(GC, 0, regCP_STAT) == 0)
2645 if (i >= adev->usec_timeout)
2646 DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt");
2651 static int gfx_v11_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
2654 const struct gfx_firmware_header_v1_0 *pfp_hdr;
2655 const __le32 *fw_data;
2656 unsigned i, fw_size;
2658 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2659 adev->gfx.pfp_fw->data;
2661 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2663 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
2664 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2665 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes);
2667 r = amdgpu_bo_create_reserved(adev, pfp_hdr->header.ucode_size_bytes,
2668 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2669 &adev->gfx.pfp.pfp_fw_obj,
2670 &adev->gfx.pfp.pfp_fw_gpu_addr,
2671 (void **)&adev->gfx.pfp.pfp_fw_ptr);
2673 dev_err(adev->dev, "(%d) failed to create pfp fw bo\n", r);
2674 gfx_v11_0_pfp_fini(adev);
2678 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_data, fw_size);
2680 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj);
2681 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj);
2683 gfx_v11_0_config_pfp_cache(adev, adev->gfx.pfp.pfp_fw_gpu_addr);
2685 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, 0);
2687 for (i = 0; i < pfp_hdr->jt_size; i++)
2688 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_DATA,
2689 le32_to_cpup(fw_data + pfp_hdr->jt_offset + i));
2691 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2696 static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
2699 const struct gfx_firmware_header_v2_0 *pfp_hdr;
2700 const __le32 *fw_ucode, *fw_data;
2701 unsigned i, pipe_id, fw_ucode_size, fw_data_size;
2703 uint32_t usec_timeout = 50000; /* wait for 50ms */
2705 pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
2706 adev->gfx.pfp_fw->data;
2708 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2711 fw_ucode = (const __le32 *)(adev->gfx.pfp_fw->data +
2712 le32_to_cpu(pfp_hdr->ucode_offset_bytes));
2713 fw_ucode_size = le32_to_cpu(pfp_hdr->ucode_size_bytes);
2715 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
2716 le32_to_cpu(pfp_hdr->data_offset_bytes));
2717 fw_data_size = le32_to_cpu(pfp_hdr->data_size_bytes);
2720 r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
2722 AMDGPU_GEM_DOMAIN_VRAM |
2723 AMDGPU_GEM_DOMAIN_GTT,
2724 &adev->gfx.pfp.pfp_fw_obj,
2725 &adev->gfx.pfp.pfp_fw_gpu_addr,
2726 (void **)&adev->gfx.pfp.pfp_fw_ptr);
2728 dev_err(adev->dev, "(%d) failed to create pfp ucode fw bo\n", r);
2729 gfx_v11_0_pfp_fini(adev);
2733 r = amdgpu_bo_create_reserved(adev, fw_data_size,
2735 AMDGPU_GEM_DOMAIN_VRAM |
2736 AMDGPU_GEM_DOMAIN_GTT,
2737 &adev->gfx.pfp.pfp_fw_data_obj,
2738 &adev->gfx.pfp.pfp_fw_data_gpu_addr,
2739 (void **)&adev->gfx.pfp.pfp_fw_data_ptr);
2741 dev_err(adev->dev, "(%d) failed to create pfp data fw bo\n", r);
2742 gfx_v11_0_pfp_fini(adev);
2746 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_ucode, fw_ucode_size);
2747 memcpy(adev->gfx.pfp.pfp_fw_data_ptr, fw_data, fw_data_size);
2749 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj);
2750 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_data_obj);
2751 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj);
2752 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj);
2754 if (amdgpu_emu_mode == 1)
2755 adev->hdp.funcs->flush_hdp(adev, NULL);
2757 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
2758 lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
2759 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
2760 upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
2762 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
2763 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2764 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2765 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2766 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
2769 * Programming any of the CP_PFP_IC_BASE registers
2770 * forces invalidation of the ME L1 I$. Wait for the
2771 * invalidation complete
2773 for (i = 0; i < usec_timeout; i++) {
2774 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2775 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2776 INVALIDATE_CACHE_COMPLETE))
2781 if (i >= usec_timeout) {
2782 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2786 /* Prime the L1 instruction caches */
2787 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2788 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1);
2789 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
2790 /* Waiting for cache primed*/
2791 for (i = 0; i < usec_timeout; i++) {
2792 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2793 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2799 if (i >= usec_timeout) {
2800 dev_err(adev->dev, "failed to prime instruction cache\n");
2804 mutex_lock(&adev->srbm_mutex);
2805 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2806 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2807 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
2808 (pfp_hdr->ucode_start_addr_hi << 30) |
2809 (pfp_hdr->ucode_start_addr_lo >> 2) );
2810 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
2811 pfp_hdr->ucode_start_addr_hi>>2);
2814 * Program CP_ME_CNTL to reset given PIPE to take
2815 * effect of CP_PFP_PRGRM_CNTR_START.
2817 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2819 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2820 PFP_PIPE0_RESET, 1);
2822 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2823 PFP_PIPE1_RESET, 1);
2824 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2826 /* Clear pfp pipe0 reset bit. */
2828 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2829 PFP_PIPE0_RESET, 0);
2831 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2832 PFP_PIPE1_RESET, 0);
2833 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2835 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO,
2836 lower_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr));
2837 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI,
2838 upper_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr));
2840 soc21_grbm_select(adev, 0, 0, 0, 0);
2841 mutex_unlock(&adev->srbm_mutex);
2843 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
2844 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
2845 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
2846 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
2848 /* Invalidate the data caches */
2849 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2850 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2851 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
2853 for (i = 0; i < usec_timeout; i++) {
2854 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2855 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
2856 INVALIDATE_DCACHE_COMPLETE))
2861 if (i >= usec_timeout) {
2862 dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
2869 static int gfx_v11_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
2872 const struct gfx_firmware_header_v1_0 *me_hdr;
2873 const __le32 *fw_data;
2874 unsigned i, fw_size;
2876 me_hdr = (const struct gfx_firmware_header_v1_0 *)
2877 adev->gfx.me_fw->data;
2879 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2881 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
2882 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2883 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes);
2885 r = amdgpu_bo_create_reserved(adev, me_hdr->header.ucode_size_bytes,
2886 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2887 &adev->gfx.me.me_fw_obj,
2888 &adev->gfx.me.me_fw_gpu_addr,
2889 (void **)&adev->gfx.me.me_fw_ptr);
2891 dev_err(adev->dev, "(%d) failed to create me fw bo\n", r);
2892 gfx_v11_0_me_fini(adev);
2896 memcpy(adev->gfx.me.me_fw_ptr, fw_data, fw_size);
2898 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj);
2899 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj);
2901 gfx_v11_0_config_me_cache(adev, adev->gfx.me.me_fw_gpu_addr);
2903 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, 0);
2905 for (i = 0; i < me_hdr->jt_size; i++)
2906 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_DATA,
2907 le32_to_cpup(fw_data + me_hdr->jt_offset + i));
2909 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, adev->gfx.me_fw_version);
2914 static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
2917 const struct gfx_firmware_header_v2_0 *me_hdr;
2918 const __le32 *fw_ucode, *fw_data;
2919 unsigned i, pipe_id, fw_ucode_size, fw_data_size;
2921 uint32_t usec_timeout = 50000; /* wait for 50ms */
2923 me_hdr = (const struct gfx_firmware_header_v2_0 *)
2924 adev->gfx.me_fw->data;
2926 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2929 fw_ucode = (const __le32 *)(adev->gfx.me_fw->data +
2930 le32_to_cpu(me_hdr->ucode_offset_bytes));
2931 fw_ucode_size = le32_to_cpu(me_hdr->ucode_size_bytes);
2933 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
2934 le32_to_cpu(me_hdr->data_offset_bytes));
2935 fw_data_size = le32_to_cpu(me_hdr->data_size_bytes);
2938 r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
2940 AMDGPU_GEM_DOMAIN_VRAM |
2941 AMDGPU_GEM_DOMAIN_GTT,
2942 &adev->gfx.me.me_fw_obj,
2943 &adev->gfx.me.me_fw_gpu_addr,
2944 (void **)&adev->gfx.me.me_fw_ptr);
2946 dev_err(adev->dev, "(%d) failed to create me ucode bo\n", r);
2947 gfx_v11_0_me_fini(adev);
2951 r = amdgpu_bo_create_reserved(adev, fw_data_size,
2953 AMDGPU_GEM_DOMAIN_VRAM |
2954 AMDGPU_GEM_DOMAIN_GTT,
2955 &adev->gfx.me.me_fw_data_obj,
2956 &adev->gfx.me.me_fw_data_gpu_addr,
2957 (void **)&adev->gfx.me.me_fw_data_ptr);
2959 dev_err(adev->dev, "(%d) failed to create me data bo\n", r);
2960 gfx_v11_0_pfp_fini(adev);
2964 memcpy(adev->gfx.me.me_fw_ptr, fw_ucode, fw_ucode_size);
2965 memcpy(adev->gfx.me.me_fw_data_ptr, fw_data, fw_data_size);
2967 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj);
2968 amdgpu_bo_kunmap(adev->gfx.me.me_fw_data_obj);
2969 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj);
2970 amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj);
2972 if (amdgpu_emu_mode == 1)
2973 adev->hdp.funcs->flush_hdp(adev, NULL);
2975 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
2976 lower_32_bits(adev->gfx.me.me_fw_gpu_addr));
2977 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
2978 upper_32_bits(adev->gfx.me.me_fw_gpu_addr));
2980 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
2981 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2982 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2983 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2984 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
2987 * Programming any of the CP_ME_IC_BASE registers
2988 * forces invalidation of the ME L1 I$. Wait for the
2989 * invalidation complete
2991 for (i = 0; i < usec_timeout; i++) {
2992 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2993 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2994 INVALIDATE_CACHE_COMPLETE))
2999 if (i >= usec_timeout) {
3000 dev_err(adev->dev, "failed to invalidate instruction cache\n");
3004 /* Prime the instruction caches */
3005 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
3006 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1);
3007 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
3009 /* Waiting for instruction cache primed*/
3010 for (i = 0; i < usec_timeout; i++) {
3011 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
3012 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
3018 if (i >= usec_timeout) {
3019 dev_err(adev->dev, "failed to prime instruction cache\n");
3023 mutex_lock(&adev->srbm_mutex);
3024 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
3025 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
3026 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
3027 (me_hdr->ucode_start_addr_hi << 30) |
3028 (me_hdr->ucode_start_addr_lo >> 2) );
3029 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
3030 me_hdr->ucode_start_addr_hi>>2);
3033 * Program CP_ME_CNTL to reset given PIPE to take
3034 * effect of CP_PFP_PRGRM_CNTR_START.
3036 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
3038 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3041 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3043 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3045 /* Clear pfp pipe0 reset bit. */
3047 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3050 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3052 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3054 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO,
3055 lower_32_bits(adev->gfx.me.me_fw_data_gpu_addr));
3056 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI,
3057 upper_32_bits(adev->gfx.me.me_fw_data_gpu_addr));
3059 soc21_grbm_select(adev, 0, 0, 0, 0);
3060 mutex_unlock(&adev->srbm_mutex);
3062 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
3063 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
3064 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
3065 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
3067 /* Invalidate the data caches */
3068 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3069 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
3070 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
3072 for (i = 0; i < usec_timeout; i++) {
3073 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3074 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
3075 INVALIDATE_DCACHE_COMPLETE))
3080 if (i >= usec_timeout) {
3081 dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
3088 static int gfx_v11_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
3092 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw)
3095 gfx_v11_0_cp_gfx_enable(adev, false);
3097 if (adev->gfx.rs64_enable)
3098 r = gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(adev);
3100 r = gfx_v11_0_cp_gfx_load_pfp_microcode(adev);
3102 dev_err(adev->dev, "(%d) failed to load pfp fw\n", r);
3106 if (adev->gfx.rs64_enable)
3107 r = gfx_v11_0_cp_gfx_load_me_microcode_rs64(adev);
3109 r = gfx_v11_0_cp_gfx_load_me_microcode(adev);
3111 dev_err(adev->dev, "(%d) failed to load me fw\n", r);
3118 static int gfx_v11_0_cp_gfx_start(struct amdgpu_device *adev)
3120 struct amdgpu_ring *ring;
3121 const struct cs_section_def *sect = NULL;
3122 const struct cs_extent_def *ext = NULL;
3127 WREG32_SOC15(GC, 0, regCP_MAX_CONTEXT,
3128 adev->gfx.config.max_hw_contexts - 1);
3129 WREG32_SOC15(GC, 0, regCP_DEVICE_ID, 1);
3131 if (!amdgpu_async_gfx_ring)
3132 gfx_v11_0_cp_gfx_enable(adev, true);
3134 ring = &adev->gfx.gfx_ring[0];
3135 r = amdgpu_ring_alloc(ring, gfx_v11_0_get_csb_size(adev));
3137 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3141 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3142 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3144 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3145 amdgpu_ring_write(ring, 0x80000000);
3146 amdgpu_ring_write(ring, 0x80000000);
3148 for (sect = gfx11_cs_data; sect->section != NULL; ++sect) {
3149 for (ext = sect->section; ext->extent != NULL; ++ext) {
3150 if (sect->id == SECT_CONTEXT) {
3151 amdgpu_ring_write(ring,
3152 PACKET3(PACKET3_SET_CONTEXT_REG,
3154 amdgpu_ring_write(ring, ext->reg_index -
3155 PACKET3_SET_CONTEXT_REG_START);
3156 for (i = 0; i < ext->reg_count; i++)
3157 amdgpu_ring_write(ring, ext->extent[i]);
3163 SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
3164 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
3165 amdgpu_ring_write(ring, ctx_reg_offset);
3166 amdgpu_ring_write(ring, adev->gfx.config.pa_sc_tile_steering_override);
3168 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3169 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3171 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3172 amdgpu_ring_write(ring, 0);
3174 amdgpu_ring_commit(ring);
3176 /* submit cs packet to copy state 0 to next available state */
3177 if (adev->gfx.num_gfx_rings > 1) {
3178 /* maximum supported gfx ring is 2 */
3179 ring = &adev->gfx.gfx_ring[1];
3180 r = amdgpu_ring_alloc(ring, 2);
3182 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3186 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3187 amdgpu_ring_write(ring, 0);
3189 amdgpu_ring_commit(ring);
3194 static void gfx_v11_0_cp_gfx_switch_pipe(struct amdgpu_device *adev,
3199 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
3200 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe);
3202 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
3205 static void gfx_v11_0_cp_gfx_set_doorbell(struct amdgpu_device *adev,
3206 struct amdgpu_ring *ring)
3210 tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL);
3211 if (ring->use_doorbell) {
3212 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3213 DOORBELL_OFFSET, ring->doorbell_index);
3214 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3217 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3220 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, tmp);
3222 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
3223 DOORBELL_RANGE_LOWER, ring->doorbell_index);
3224 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, tmp);
3226 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER,
3227 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
3230 static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev)
3232 struct amdgpu_ring *ring;
3235 u64 rb_addr, rptr_addr, wptr_gpu_addr;
3237 /* Set the write pointer delay */
3238 WREG32_SOC15(GC, 0, regCP_RB_WPTR_DELAY, 0);
3240 /* set the RB to use vmid 0 */
3241 WREG32_SOC15(GC, 0, regCP_RB_VMID, 0);
3243 /* Init gfx ring 0 for pipe 0 */
3244 mutex_lock(&adev->srbm_mutex);
3245 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
3247 /* Set ring buffer size */
3248 ring = &adev->gfx.gfx_ring[0];
3249 rb_bufsz = order_base_2(ring->ring_size / 8);
3250 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
3251 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
3252 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp);
3254 /* Initialize the ring buffer's write pointers */
3256 WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr));
3257 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3259 /* set the wb address wether it's enabled or not */
3260 rptr_addr = ring->rptr_gpu_addr;
3261 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
3262 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
3263 CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3265 wptr_gpu_addr = ring->wptr_gpu_addr;
3266 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO,
3267 lower_32_bits(wptr_gpu_addr));
3268 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI,
3269 upper_32_bits(wptr_gpu_addr));
3272 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp);
3274 rb_addr = ring->gpu_addr >> 8;
3275 WREG32_SOC15(GC, 0, regCP_RB0_BASE, rb_addr);
3276 WREG32_SOC15(GC, 0, regCP_RB0_BASE_HI, upper_32_bits(rb_addr));
3278 WREG32_SOC15(GC, 0, regCP_RB_ACTIVE, 1);
3280 gfx_v11_0_cp_gfx_set_doorbell(adev, ring);
3281 mutex_unlock(&adev->srbm_mutex);
3283 /* Init gfx ring 1 for pipe 1 */
3284 if (adev->gfx.num_gfx_rings > 1) {
3285 mutex_lock(&adev->srbm_mutex);
3286 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
3287 /* maximum supported gfx ring is 2 */
3288 ring = &adev->gfx.gfx_ring[1];
3289 rb_bufsz = order_base_2(ring->ring_size / 8);
3290 tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
3291 tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
3292 WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp);
3293 /* Initialize the ring buffer's write pointers */
3295 WREG32_SOC15(GC, 0, regCP_RB1_WPTR, lower_32_bits(ring->wptr));
3296 WREG32_SOC15(GC, 0, regCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
3297 /* Set the wb address wether it's enabled or not */
3298 rptr_addr = ring->rptr_gpu_addr;
3299 WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
3300 WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
3301 CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3302 wptr_gpu_addr = ring->wptr_gpu_addr;
3303 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO,
3304 lower_32_bits(wptr_gpu_addr));
3305 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI,
3306 upper_32_bits(wptr_gpu_addr));
3309 WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp);
3311 rb_addr = ring->gpu_addr >> 8;
3312 WREG32_SOC15(GC, 0, regCP_RB1_BASE, rb_addr);
3313 WREG32_SOC15(GC, 0, regCP_RB1_BASE_HI, upper_32_bits(rb_addr));
3314 WREG32_SOC15(GC, 0, regCP_RB1_ACTIVE, 1);
3316 gfx_v11_0_cp_gfx_set_doorbell(adev, ring);
3317 mutex_unlock(&adev->srbm_mutex);
3319 /* Switch to pipe 0 */
3320 mutex_lock(&adev->srbm_mutex);
3321 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
3322 mutex_unlock(&adev->srbm_mutex);
3324 /* start the ring */
3325 gfx_v11_0_cp_gfx_start(adev);
3330 static void gfx_v11_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
3334 if (adev->gfx.rs64_enable) {
3335 data = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
3336 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE,
3338 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET,
3340 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET,
3342 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET,
3344 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET,
3346 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE,
3348 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE,
3350 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE,
3352 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE,
3354 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT,
3356 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, data);
3358 data = RREG32_SOC15(GC, 0, regCP_MEC_CNTL);
3361 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 0);
3362 if (!adev->enable_mes_kiq)
3363 data = REG_SET_FIELD(data, CP_MEC_CNTL,
3366 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 1);
3367 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME2_HALT, 1);
3369 WREG32_SOC15(GC, 0, regCP_MEC_CNTL, data);
3375 static int gfx_v11_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3377 const struct gfx_firmware_header_v1_0 *mec_hdr;
3378 const __le32 *fw_data;
3379 unsigned i, fw_size;
3383 if (!adev->gfx.mec_fw)
3386 gfx_v11_0_cp_compute_enable(adev, false);
3388 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3389 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3391 fw_data = (const __le32 *)
3392 (adev->gfx.mec_fw->data +
3393 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
3394 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
3396 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
3397 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
3398 &adev->gfx.mec.mec_fw_obj,
3399 &adev->gfx.mec.mec_fw_gpu_addr,
3402 dev_err(adev->dev, "(%d) failed to create mec fw bo\n", r);
3403 gfx_v11_0_mec_fini(adev);
3407 memcpy(fw, fw_data, fw_size);
3409 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
3410 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
3412 gfx_v11_0_config_mec_cache(adev, adev->gfx.mec.mec_fw_gpu_addr);
3415 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, 0);
3417 for (i = 0; i < mec_hdr->jt_size; i++)
3418 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_DATA,
3419 le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
3421 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version);
3426 static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev)
3428 const struct gfx_firmware_header_v2_0 *mec_hdr;
3429 const __le32 *fw_ucode, *fw_data;
3430 u32 tmp, fw_ucode_size, fw_data_size;
3431 u32 i, usec_timeout = 50000; /* Wait for 50 ms */
3432 u32 *fw_ucode_ptr, *fw_data_ptr;
3435 if (!adev->gfx.mec_fw)
3438 gfx_v11_0_cp_compute_enable(adev, false);
3440 mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
3441 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3443 fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data +
3444 le32_to_cpu(mec_hdr->ucode_offset_bytes));
3445 fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes);
3447 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
3448 le32_to_cpu(mec_hdr->data_offset_bytes));
3449 fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes);
3451 r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
3453 AMDGPU_GEM_DOMAIN_VRAM |
3454 AMDGPU_GEM_DOMAIN_GTT,
3455 &adev->gfx.mec.mec_fw_obj,
3456 &adev->gfx.mec.mec_fw_gpu_addr,
3457 (void **)&fw_ucode_ptr);
3459 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
3460 gfx_v11_0_mec_fini(adev);
3464 r = amdgpu_bo_create_reserved(adev, fw_data_size,
3466 AMDGPU_GEM_DOMAIN_VRAM |
3467 AMDGPU_GEM_DOMAIN_GTT,
3468 &adev->gfx.mec.mec_fw_data_obj,
3469 &adev->gfx.mec.mec_fw_data_gpu_addr,
3470 (void **)&fw_data_ptr);
3472 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
3473 gfx_v11_0_mec_fini(adev);
3477 memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size);
3478 memcpy(fw_data_ptr, fw_data, fw_data_size);
3480 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
3481 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj);
3482 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
3483 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj);
3485 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
3486 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
3487 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
3488 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
3489 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
3491 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL);
3492 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0);
3493 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0);
3494 WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp);
3496 mutex_lock(&adev->srbm_mutex);
3497 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
3498 soc21_grbm_select(adev, 1, i, 0, 0);
3500 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, adev->gfx.mec.mec_fw_data_gpu_addr);
3501 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI,
3502 upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr));
3504 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
3505 mec_hdr->ucode_start_addr_lo >> 2 |
3506 mec_hdr->ucode_start_addr_hi << 30);
3507 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
3508 mec_hdr->ucode_start_addr_hi >> 2);
3510 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, adev->gfx.mec.mec_fw_gpu_addr);
3511 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
3512 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
3514 mutex_unlock(&adev->srbm_mutex);
3515 soc21_grbm_select(adev, 0, 0, 0, 0);
3517 /* Trigger an invalidation of the L1 instruction caches */
3518 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
3519 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
3520 WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp);
3522 /* Wait for invalidation complete */
3523 for (i = 0; i < usec_timeout; i++) {
3524 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
3525 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL,
3526 INVALIDATE_DCACHE_COMPLETE))
3531 if (i >= usec_timeout) {
3532 dev_err(adev->dev, "failed to invalidate instruction cache\n");
3536 /* Trigger an invalidation of the L1 instruction caches */
3537 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
3538 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
3539 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
3541 /* Wait for invalidation complete */
3542 for (i = 0; i < usec_timeout; i++) {
3543 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
3544 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
3545 INVALIDATE_CACHE_COMPLETE))
3550 if (i >= usec_timeout) {
3551 dev_err(adev->dev, "failed to invalidate instruction cache\n");
3558 static void gfx_v11_0_kiq_setting(struct amdgpu_ring *ring)
3561 struct amdgpu_device *adev = ring->adev;
3563 /* tell RLC which is KIQ queue */
3564 tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
3566 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
3567 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
3569 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
3572 static void gfx_v11_0_cp_set_doorbell_range(struct amdgpu_device *adev)
3574 /* set graphics engine doorbell range */
3575 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER,
3576 (adev->doorbell_index.gfx_ring0 * 2) << 2);
3577 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER,
3578 (adev->doorbell_index.gfx_userqueue_end * 2) << 2);
3580 /* set compute engine doorbell range */
3581 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER,
3582 (adev->doorbell_index.kiq * 2) << 2);
3583 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER,
3584 (adev->doorbell_index.userqueue_end * 2) << 2);
3587 static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
3588 struct amdgpu_mqd_prop *prop)
3590 struct v11_gfx_mqd *mqd = m;
3591 uint64_t hqd_gpu_addr, wb_gpu_addr;
3595 /* set up gfx hqd wptr */
3596 mqd->cp_gfx_hqd_wptr = 0;
3597 mqd->cp_gfx_hqd_wptr_hi = 0;
3599 /* set the pointer to the MQD */
3600 mqd->cp_mqd_base_addr = prop->mqd_gpu_addr & 0xfffffffc;
3601 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
3603 /* set up mqd control */
3604 tmp = RREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL);
3605 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0);
3606 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1);
3607 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0);
3608 mqd->cp_gfx_mqd_control = tmp;
3610 /* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */
3611 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID);
3612 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0);
3613 mqd->cp_gfx_hqd_vmid = 0;
3615 /* set up default queue priority level
3616 * 0x0 = low priority, 0x1 = high priority */
3617 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY);
3618 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, 0);
3619 mqd->cp_gfx_hqd_queue_priority = tmp;
3621 /* set up time quantum */
3622 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM);
3623 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1);
3624 mqd->cp_gfx_hqd_quantum = tmp;
3626 /* set up gfx hqd base. this is similar as CP_RB_BASE */
3627 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8;
3628 mqd->cp_gfx_hqd_base = hqd_gpu_addr;
3629 mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr);
3631 /* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */
3632 wb_gpu_addr = prop->rptr_gpu_addr;
3633 mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc;
3634 mqd->cp_gfx_hqd_rptr_addr_hi =
3635 upper_32_bits(wb_gpu_addr) & 0xffff;
3637 /* set up rb_wptr_poll addr */
3638 wb_gpu_addr = prop->wptr_gpu_addr;
3639 mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3640 mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3642 /* set up the gfx_hqd_control, similar as CP_RB0_CNTL */
3643 rb_bufsz = order_base_2(prop->queue_size / 4) - 1;
3644 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL);
3645 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz);
3646 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2);
3648 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1);
3650 mqd->cp_gfx_hqd_cntl = tmp;
3652 /* set up cp_doorbell_control */
3653 tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL);
3654 if (prop->use_doorbell) {
3655 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3656 DOORBELL_OFFSET, prop->doorbell_index);
3657 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3660 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3662 mqd->cp_rb_doorbell_control = tmp;
3664 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3665 mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR);
3667 /* active the queue */
3668 mqd->cp_gfx_hqd_active = 1;
3673 static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring)
3675 struct amdgpu_device *adev = ring->adev;
3676 struct v11_gfx_mqd *mqd = ring->mqd_ptr;
3677 int mqd_idx = ring - &adev->gfx.gfx_ring[0];
3679 if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
3680 memset((void *)mqd, 0, sizeof(*mqd));
3681 mutex_lock(&adev->srbm_mutex);
3682 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3683 amdgpu_ring_init_mqd(ring);
3684 soc21_grbm_select(adev, 0, 0, 0, 0);
3685 mutex_unlock(&adev->srbm_mutex);
3686 if (adev->gfx.me.mqd_backup[mqd_idx])
3687 memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
3689 /* restore mqd with the backup copy */
3690 if (adev->gfx.me.mqd_backup[mqd_idx])
3691 memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
3692 /* reset the ring */
3694 *ring->wptr_cpu_addr = 0;
3695 amdgpu_ring_clear_ring(ring);
3701 static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
3704 struct amdgpu_ring *ring;
3706 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3707 ring = &adev->gfx.gfx_ring[i];
3709 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3710 if (unlikely(r != 0))
3713 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3715 r = gfx_v11_0_gfx_init_queue(ring);
3716 amdgpu_bo_kunmap(ring->mqd_obj);
3717 ring->mqd_ptr = NULL;
3719 amdgpu_bo_unreserve(ring->mqd_obj);
3724 r = amdgpu_gfx_enable_kgq(adev, 0);
3728 return gfx_v11_0_cp_gfx_start(adev);
3731 static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
3732 struct amdgpu_mqd_prop *prop)
3734 struct v11_compute_mqd *mqd = m;
3735 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
3738 mqd->header = 0xC0310800;
3739 mqd->compute_pipelinestat_enable = 0x00000001;
3740 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3741 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3742 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3743 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3744 mqd->compute_misc_reserved = 0x00000007;
3746 eop_base_addr = prop->eop_gpu_addr >> 8;
3747 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
3748 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
3750 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3751 tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL);
3752 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3753 (order_base_2(GFX11_MEC_HPD_SIZE / 4) - 1));
3755 mqd->cp_hqd_eop_control = tmp;
3757 /* enable doorbell? */
3758 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
3760 if (prop->use_doorbell) {
3761 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3762 DOORBELL_OFFSET, prop->doorbell_index);
3763 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3765 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3766 DOORBELL_SOURCE, 0);
3767 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3770 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3774 mqd->cp_hqd_pq_doorbell_control = tmp;
3776 /* disable the queue if it's active */
3777 mqd->cp_hqd_dequeue_request = 0;
3778 mqd->cp_hqd_pq_rptr = 0;
3779 mqd->cp_hqd_pq_wptr_lo = 0;
3780 mqd->cp_hqd_pq_wptr_hi = 0;
3782 /* set the pointer to the MQD */
3783 mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc;
3784 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
3786 /* set MQD vmid to 0 */
3787 tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL);
3788 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3789 mqd->cp_mqd_control = tmp;
3791 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3792 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8;
3793 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3794 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3796 /* set up the HQD, this is similar to CP_RB0_CNTL */
3797 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL);
3798 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3799 (order_base_2(prop->queue_size / 4) - 1));
3800 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3801 (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
3802 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
3803 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
3804 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3805 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3806 mqd->cp_hqd_pq_control = tmp;
3808 /* set the wb address whether it's enabled or not */
3809 wb_gpu_addr = prop->rptr_gpu_addr;
3810 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3811 mqd->cp_hqd_pq_rptr_report_addr_hi =
3812 upper_32_bits(wb_gpu_addr) & 0xffff;
3814 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3815 wb_gpu_addr = prop->wptr_gpu_addr;
3816 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3817 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3820 /* enable the doorbell if requested */
3821 if (prop->use_doorbell) {
3822 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
3823 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3824 DOORBELL_OFFSET, prop->doorbell_index);
3826 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3828 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3829 DOORBELL_SOURCE, 0);
3830 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3834 mqd->cp_hqd_pq_doorbell_control = tmp;
3836 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3837 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR);
3839 /* set the vmid for the queue */
3840 mqd->cp_hqd_vmid = 0;
3842 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE);
3843 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55);
3844 mqd->cp_hqd_persistent_state = tmp;
3846 /* set MIN_IB_AVAIL_SIZE */
3847 tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL);
3848 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
3849 mqd->cp_hqd_ib_control = tmp;
3851 /* set static priority for a compute queue/ring */
3852 mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority;
3853 mqd->cp_hqd_queue_priority = prop->hqd_queue_priority;
3855 mqd->cp_hqd_active = prop->hqd_active;
3860 static int gfx_v11_0_kiq_init_register(struct amdgpu_ring *ring)
3862 struct amdgpu_device *adev = ring->adev;
3863 struct v11_compute_mqd *mqd = ring->mqd_ptr;
3866 /* inactivate the queue */
3867 if (amdgpu_sriov_vf(adev))
3868 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 0);
3870 /* disable wptr polling */
3871 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3873 /* write the EOP addr */
3874 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR,
3875 mqd->cp_hqd_eop_base_addr_lo);
3876 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI,
3877 mqd->cp_hqd_eop_base_addr_hi);
3879 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3880 WREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL,
3881 mqd->cp_hqd_eop_control);
3883 /* enable doorbell? */
3884 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL,
3885 mqd->cp_hqd_pq_doorbell_control);
3887 /* disable the queue if it's active */
3888 if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) {
3889 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1);
3890 for (j = 0; j < adev->usec_timeout; j++) {
3891 if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
3895 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST,
3896 mqd->cp_hqd_dequeue_request);
3897 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR,
3898 mqd->cp_hqd_pq_rptr);
3899 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO,
3900 mqd->cp_hqd_pq_wptr_lo);
3901 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI,
3902 mqd->cp_hqd_pq_wptr_hi);
3905 /* set the pointer to the MQD */
3906 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR,
3907 mqd->cp_mqd_base_addr_lo);
3908 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI,
3909 mqd->cp_mqd_base_addr_hi);
3911 /* set MQD vmid to 0 */
3912 WREG32_SOC15(GC, 0, regCP_MQD_CONTROL,
3913 mqd->cp_mqd_control);
3915 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3916 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE,
3917 mqd->cp_hqd_pq_base_lo);
3918 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI,
3919 mqd->cp_hqd_pq_base_hi);
3921 /* set up the HQD, this is similar to CP_RB0_CNTL */
3922 WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL,
3923 mqd->cp_hqd_pq_control);
3925 /* set the wb address whether it's enabled or not */
3926 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR,
3927 mqd->cp_hqd_pq_rptr_report_addr_lo);
3928 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3929 mqd->cp_hqd_pq_rptr_report_addr_hi);
3931 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3932 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR,
3933 mqd->cp_hqd_pq_wptr_poll_addr_lo);
3934 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3935 mqd->cp_hqd_pq_wptr_poll_addr_hi);
3937 /* enable the doorbell if requested */
3938 if (ring->use_doorbell) {
3939 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER,
3940 (adev->doorbell_index.kiq * 2) << 2);
3941 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER,
3942 (adev->doorbell_index.userqueue_end * 2) << 2);
3945 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL,
3946 mqd->cp_hqd_pq_doorbell_control);
3948 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3949 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO,
3950 mqd->cp_hqd_pq_wptr_lo);
3951 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI,
3952 mqd->cp_hqd_pq_wptr_hi);
3954 /* set the vmid for the queue */
3955 WREG32_SOC15(GC, 0, regCP_HQD_VMID, mqd->cp_hqd_vmid);
3957 WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE,
3958 mqd->cp_hqd_persistent_state);
3960 /* activate the queue */
3961 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE,
3962 mqd->cp_hqd_active);
3964 if (ring->use_doorbell)
3965 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3970 static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
3972 struct amdgpu_device *adev = ring->adev;
3973 struct v11_compute_mqd *mqd = ring->mqd_ptr;
3975 gfx_v11_0_kiq_setting(ring);
3977 if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
3978 /* reset MQD to a clean status */
3979 if (adev->gfx.kiq[0].mqd_backup)
3980 memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
3982 /* reset ring buffer */
3984 amdgpu_ring_clear_ring(ring);
3986 mutex_lock(&adev->srbm_mutex);
3987 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3988 gfx_v11_0_kiq_init_register(ring);
3989 soc21_grbm_select(adev, 0, 0, 0, 0);
3990 mutex_unlock(&adev->srbm_mutex);
3992 memset((void *)mqd, 0, sizeof(*mqd));
3993 if (amdgpu_sriov_vf(adev) && adev->in_suspend)
3994 amdgpu_ring_clear_ring(ring);
3995 mutex_lock(&adev->srbm_mutex);
3996 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3997 amdgpu_ring_init_mqd(ring);
3998 gfx_v11_0_kiq_init_register(ring);
3999 soc21_grbm_select(adev, 0, 0, 0, 0);
4000 mutex_unlock(&adev->srbm_mutex);
4002 if (adev->gfx.kiq[0].mqd_backup)
4003 memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
4009 static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring)
4011 struct amdgpu_device *adev = ring->adev;
4012 struct v11_compute_mqd *mqd = ring->mqd_ptr;
4013 int mqd_idx = ring - &adev->gfx.compute_ring[0];
4015 if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
4016 memset((void *)mqd, 0, sizeof(*mqd));
4017 mutex_lock(&adev->srbm_mutex);
4018 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4019 amdgpu_ring_init_mqd(ring);
4020 soc21_grbm_select(adev, 0, 0, 0, 0);
4021 mutex_unlock(&adev->srbm_mutex);
4023 if (adev->gfx.mec.mqd_backup[mqd_idx])
4024 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
4026 /* restore MQD to a clean status */
4027 if (adev->gfx.mec.mqd_backup[mqd_idx])
4028 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
4029 /* reset ring buffer */
4031 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
4032 amdgpu_ring_clear_ring(ring);
4038 static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev)
4040 struct amdgpu_ring *ring;
4043 ring = &adev->gfx.kiq[0].ring;
4045 r = amdgpu_bo_reserve(ring->mqd_obj, false);
4046 if (unlikely(r != 0))
4049 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
4050 if (unlikely(r != 0)) {
4051 amdgpu_bo_unreserve(ring->mqd_obj);
4055 gfx_v11_0_kiq_init_queue(ring);
4056 amdgpu_bo_kunmap(ring->mqd_obj);
4057 ring->mqd_ptr = NULL;
4058 amdgpu_bo_unreserve(ring->mqd_obj);
4059 ring->sched.ready = true;
4063 static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev)
4065 struct amdgpu_ring *ring = NULL;
4068 if (!amdgpu_async_gfx_ring)
4069 gfx_v11_0_cp_compute_enable(adev, true);
4071 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4072 ring = &adev->gfx.compute_ring[i];
4074 r = amdgpu_bo_reserve(ring->mqd_obj, false);
4075 if (unlikely(r != 0))
4077 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
4079 r = gfx_v11_0_kcq_init_queue(ring);
4080 amdgpu_bo_kunmap(ring->mqd_obj);
4081 ring->mqd_ptr = NULL;
4083 amdgpu_bo_unreserve(ring->mqd_obj);
4088 r = amdgpu_gfx_enable_kcq(adev, 0);
4093 static int gfx_v11_0_cp_resume(struct amdgpu_device *adev)
4096 struct amdgpu_ring *ring;
4098 if (!(adev->flags & AMD_IS_APU))
4099 gfx_v11_0_enable_gui_idle_interrupt(adev, false);
4101 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
4102 /* legacy firmware loading */
4103 r = gfx_v11_0_cp_gfx_load_microcode(adev);
4107 if (adev->gfx.rs64_enable)
4108 r = gfx_v11_0_cp_compute_load_microcode_rs64(adev);
4110 r = gfx_v11_0_cp_compute_load_microcode(adev);
4115 gfx_v11_0_cp_set_doorbell_range(adev);
4117 if (amdgpu_async_gfx_ring) {
4118 gfx_v11_0_cp_compute_enable(adev, true);
4119 gfx_v11_0_cp_gfx_enable(adev, true);
4122 if (adev->enable_mes_kiq && adev->mes.kiq_hw_init)
4123 r = amdgpu_mes_kiq_hw_init(adev);
4125 r = gfx_v11_0_kiq_resume(adev);
4129 r = gfx_v11_0_kcq_resume(adev);
4133 if (!amdgpu_async_gfx_ring) {
4134 r = gfx_v11_0_cp_gfx_resume(adev);
4138 r = gfx_v11_0_cp_async_gfx_ring_resume(adev);
4143 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4144 ring = &adev->gfx.gfx_ring[i];
4145 r = amdgpu_ring_test_helper(ring);
4150 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4151 ring = &adev->gfx.compute_ring[i];
4152 r = amdgpu_ring_test_helper(ring);
4160 static void gfx_v11_0_cp_enable(struct amdgpu_device *adev, bool enable)
4162 gfx_v11_0_cp_gfx_enable(adev, enable);
4163 gfx_v11_0_cp_compute_enable(adev, enable);
4166 static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev)
4171 r = adev->gfxhub.funcs->gart_enable(adev);
4175 adev->hdp.funcs->flush_hdp(adev, NULL);
4177 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
4180 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
4181 amdgpu_gmc_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0);
4186 static void gfx_v11_0_select_cp_fw_arch(struct amdgpu_device *adev)
4191 if (adev->gfx.rs64_enable) {
4192 tmp = RREG32_SOC15(GC, 0, regCP_GFX_CNTL);
4193 tmp = REG_SET_FIELD(tmp, CP_GFX_CNTL, ENGINE_SEL, 1);
4194 WREG32_SOC15(GC, 0, regCP_GFX_CNTL, tmp);
4196 tmp = RREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL);
4197 tmp = REG_SET_FIELD(tmp, CP_MEC_ISA_CNTL, ISA_MODE, 1);
4198 WREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL, tmp);
4201 if (amdgpu_emu_mode == 1)
4205 static int get_gb_addr_config(struct amdgpu_device * adev)
4209 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
4210 if (gb_addr_config == 0)
4213 adev->gfx.config.gb_addr_config_fields.num_pkrs =
4214 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
4216 adev->gfx.config.gb_addr_config = gb_addr_config;
4218 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
4219 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4220 GB_ADDR_CONFIG, NUM_PIPES);
4222 adev->gfx.config.max_tile_pipes =
4223 adev->gfx.config.gb_addr_config_fields.num_pipes;
4225 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
4226 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4227 GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS);
4228 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
4229 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4230 GB_ADDR_CONFIG, NUM_RB_PER_SE);
4231 adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
4232 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4233 GB_ADDR_CONFIG, NUM_SHADER_ENGINES);
4234 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
4235 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4236 GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE));
4241 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev)
4245 data = RREG32_SOC15(GC, 0, regCPC_PSP_DEBUG);
4246 data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK;
4247 WREG32_SOC15(GC, 0, regCPC_PSP_DEBUG, data);
4249 data = RREG32_SOC15(GC, 0, regCPG_PSP_DEBUG);
4250 data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK;
4251 WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data);
4254 static int gfx_v11_0_hw_init(void *handle)
4257 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4259 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
4260 if (adev->gfx.imu.funcs) {
4261 /* RLC autoload sequence 1: Program rlc ram */
4262 if (adev->gfx.imu.funcs->program_rlc_ram)
4263 adev->gfx.imu.funcs->program_rlc_ram(adev);
4265 /* rlc autoload firmware */
4266 r = gfx_v11_0_rlc_backdoor_autoload_enable(adev);
4270 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
4271 if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) {
4272 if (adev->gfx.imu.funcs->load_microcode)
4273 adev->gfx.imu.funcs->load_microcode(adev);
4274 if (adev->gfx.imu.funcs->setup_imu)
4275 adev->gfx.imu.funcs->setup_imu(adev);
4276 if (adev->gfx.imu.funcs->start_imu)
4277 adev->gfx.imu.funcs->start_imu(adev);
4280 /* disable gpa mode in backdoor loading */
4281 gfx_v11_0_disable_gpa_mode(adev);
4285 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) ||
4286 (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
4287 r = gfx_v11_0_wait_for_rlc_autoload_complete(adev);
4289 dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r);
4294 adev->gfx.is_poweron = true;
4296 if(get_gb_addr_config(adev))
4297 DRM_WARN("Invalid gb_addr_config !\n");
4299 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
4300 adev->gfx.rs64_enable)
4301 gfx_v11_0_config_gfx_rs64(adev);
4303 r = gfx_v11_0_gfxhub_enable(adev);
4307 if (!amdgpu_emu_mode)
4308 gfx_v11_0_init_golden_registers(adev);
4310 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
4311 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
4313 * For gfx 11, rlc firmware loading relies on smu firmware is
4314 * loaded firstly, so in direct type, it has to load smc ucode
4317 if (!(adev->flags & AMD_IS_APU)) {
4318 r = amdgpu_pm_load_smu_firmware(adev, NULL);
4324 gfx_v11_0_constants_init(adev);
4326 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
4327 gfx_v11_0_select_cp_fw_arch(adev);
4329 if (adev->nbio.funcs->gc_doorbell_init)
4330 adev->nbio.funcs->gc_doorbell_init(adev);
4332 r = gfx_v11_0_rlc_resume(adev);
4337 * init golden registers and rlc resume may override some registers,
4338 * reconfig them here
4340 gfx_v11_0_tcp_harvest(adev);
4342 r = gfx_v11_0_cp_resume(adev);
4349 static int gfx_v11_0_hw_fini(void *handle)
4351 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4353 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4354 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4356 if (!adev->no_hw_access) {
4357 if (amdgpu_async_gfx_ring) {
4358 if (amdgpu_gfx_disable_kgq(adev, 0))
4359 DRM_ERROR("KGQ disable failed\n");
4362 if (amdgpu_gfx_disable_kcq(adev, 0))
4363 DRM_ERROR("KCQ disable failed\n");
4365 amdgpu_mes_kiq_hw_fini(adev);
4368 if (amdgpu_sriov_vf(adev))
4369 /* Remove the steps disabling CPG and clearing KIQ position,
4370 * so that CP could perform IDLE-SAVE during switch. Those
4371 * steps are necessary to avoid a DMAR error in gfx9 but it is
4372 * not reproduced on gfx11.
4376 gfx_v11_0_cp_enable(adev, false);
4377 gfx_v11_0_enable_gui_idle_interrupt(adev, false);
4379 adev->gfxhub.funcs->gart_disable(adev);
4381 adev->gfx.is_poweron = false;
4386 static int gfx_v11_0_suspend(void *handle)
4388 return gfx_v11_0_hw_fini(handle);
4391 static int gfx_v11_0_resume(void *handle)
4393 return gfx_v11_0_hw_init(handle);
4396 static bool gfx_v11_0_is_idle(void *handle)
4398 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4400 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS),
4401 GRBM_STATUS, GUI_ACTIVE))
4407 static int gfx_v11_0_wait_for_idle(void *handle)
4411 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4413 for (i = 0; i < adev->usec_timeout; i++) {
4414 /* read MC_STATUS */
4415 tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS) &
4416 GRBM_STATUS__GUI_ACTIVE_MASK;
4418 if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
4425 static int gfx_v11_0_soft_reset(void *handle)
4427 u32 grbm_soft_reset = 0;
4430 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4432 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
4433 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 0);
4434 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 0);
4435 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 0);
4436 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 0);
4437 WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
4439 gfx_v11_0_set_safe_mode(adev, 0);
4441 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
4442 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
4443 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
4444 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
4445 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i);
4446 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j);
4447 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k);
4448 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
4450 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
4451 WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
4455 for (i = 0; i < adev->gfx.me.num_me; ++i) {
4456 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
4457 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
4458 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
4459 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i);
4460 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j);
4461 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k);
4462 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
4464 WREG32_SOC15(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST, 0x1);
4469 WREG32_SOC15(GC, 0, regCP_VMID_RESET, 0xfffffffe);
4471 // Read CP_VMID_RESET register three times.
4472 // to get sufficient time for GFX_HQD_ACTIVE reach 0
4473 RREG32_SOC15(GC, 0, regCP_VMID_RESET);
4474 RREG32_SOC15(GC, 0, regCP_VMID_RESET);
4475 RREG32_SOC15(GC, 0, regCP_VMID_RESET);
4477 for (i = 0; i < adev->usec_timeout; i++) {
4478 if (!RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) &&
4479 !RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE))
4483 if (i >= adev->usec_timeout) {
4484 printk("Failed to wait all pipes clean\n");
4488 /********** trigger soft reset ***********/
4489 grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
4490 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4492 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4494 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4496 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4498 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4500 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset);
4501 /********** exit soft reset ***********/
4502 grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
4503 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4505 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4507 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4509 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4511 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4513 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset);
4515 tmp = RREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL);
4516 tmp = REG_SET_FIELD(tmp, CP_SOFT_RESET_CNTL, CMP_HQD_REG_RESET, 0x1);
4517 WREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL, tmp);
4519 WREG32_SOC15(GC, 0, regCP_ME_CNTL, 0x0);
4520 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, 0x0);
4522 for (i = 0; i < adev->usec_timeout; i++) {
4523 if (!RREG32_SOC15(GC, 0, regCP_VMID_RESET))
4527 if (i >= adev->usec_timeout) {
4528 printk("Failed to wait CP_VMID_RESET to 0\n");
4532 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
4533 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1);
4534 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1);
4535 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1);
4536 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
4537 WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
4539 gfx_v11_0_unset_safe_mode(adev, 0);
4541 return gfx_v11_0_cp_resume(adev);
4544 static bool gfx_v11_0_check_soft_reset(void *handle)
4547 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4548 struct amdgpu_ring *ring;
4549 long tmo = msecs_to_jiffies(1000);
4551 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4552 ring = &adev->gfx.gfx_ring[i];
4553 r = amdgpu_ring_test_ib(ring, tmo);
4558 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4559 ring = &adev->gfx.compute_ring[i];
4560 r = amdgpu_ring_test_ib(ring, tmo);
4568 static int gfx_v11_0_post_soft_reset(void *handle)
4571 * GFX soft reset will impact MES, need resume MES when do GFX soft reset
4573 return amdgpu_mes_resume((struct amdgpu_device *)handle);
4576 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4579 uint64_t clock_counter_lo, clock_counter_hi_pre, clock_counter_hi_after;
4581 if (amdgpu_sriov_vf(adev)) {
4582 amdgpu_gfx_off_ctrl(adev, false);
4583 mutex_lock(&adev->gfx.gpu_clock_mutex);
4584 clock_counter_hi_pre = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
4585 clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
4586 clock_counter_hi_after = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
4587 if (clock_counter_hi_pre != clock_counter_hi_after)
4588 clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
4589 mutex_unlock(&adev->gfx.gpu_clock_mutex);
4590 amdgpu_gfx_off_ctrl(adev, true);
4593 clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
4594 clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
4595 clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
4596 if (clock_counter_hi_pre != clock_counter_hi_after)
4597 clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
4600 clock = clock_counter_lo | (clock_counter_hi_after << 32ULL);
4605 static void gfx_v11_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4607 uint32_t gds_base, uint32_t gds_size,
4608 uint32_t gws_base, uint32_t gws_size,
4609 uint32_t oa_base, uint32_t oa_size)
4611 struct amdgpu_device *adev = ring->adev;
4614 gfx_v11_0_write_data_to_reg(ring, 0, false,
4615 SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_BASE) + 2 * vmid,
4619 gfx_v11_0_write_data_to_reg(ring, 0, false,
4620 SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_SIZE) + 2 * vmid,
4624 gfx_v11_0_write_data_to_reg(ring, 0, false,
4625 SOC15_REG_OFFSET(GC, 0, regGDS_GWS_VMID0) + vmid,
4626 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4629 gfx_v11_0_write_data_to_reg(ring, 0, false,
4630 SOC15_REG_OFFSET(GC, 0, regGDS_OA_VMID0) + vmid,
4631 (1 << (oa_size + oa_base)) - (1 << oa_base));
4634 static int gfx_v11_0_early_init(void *handle)
4636 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4638 adev->gfx.funcs = &gfx_v11_0_gfx_funcs;
4640 adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS;
4641 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
4642 AMDGPU_MAX_COMPUTE_RINGS);
4644 gfx_v11_0_set_kiq_pm4_funcs(adev);
4645 gfx_v11_0_set_ring_funcs(adev);
4646 gfx_v11_0_set_irq_funcs(adev);
4647 gfx_v11_0_set_gds_init(adev);
4648 gfx_v11_0_set_rlc_funcs(adev);
4649 gfx_v11_0_set_mqd_funcs(adev);
4650 gfx_v11_0_set_imu_funcs(adev);
4652 gfx_v11_0_init_rlcg_reg_access_ctrl(adev);
4654 return gfx_v11_0_init_microcode(adev);
4657 static int gfx_v11_0_late_init(void *handle)
4659 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4662 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4666 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4673 static bool gfx_v11_0_is_rlc_enabled(struct amdgpu_device *adev)
4677 /* if RLC is not enabled, do nothing */
4678 rlc_cntl = RREG32_SOC15(GC, 0, regRLC_CNTL);
4679 return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false;
4682 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
4687 data = RLC_SAFE_MODE__CMD_MASK;
4688 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
4690 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data);
4692 /* wait for RLC_SAFE_MODE */
4693 for (i = 0; i < adev->usec_timeout; i++) {
4694 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, regRLC_SAFE_MODE),
4695 RLC_SAFE_MODE, CMD))
4701 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
4703 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK);
4706 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
4711 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_PERF_CLK))
4714 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4717 data &= ~RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
4719 data |= RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
4722 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4725 static void gfx_v11_0_update_sram_fgcg(struct amdgpu_device *adev,
4730 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
4733 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4736 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
4738 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
4741 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4744 static void gfx_v11_0_update_repeater_fgcg(struct amdgpu_device *adev,
4749 if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
4752 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4755 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK;
4757 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK;
4760 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4763 static void gfx_v11_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
4768 if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)))
4771 /* It is disabled by HW by default */
4773 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
4774 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
4775 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4777 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4778 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
4779 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
4782 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4785 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
4786 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4788 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
4789 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4790 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
4793 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4798 static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
4803 if (!(adev->cg_flags &
4804 (AMD_CG_SUPPORT_GFX_CGCG |
4805 AMD_CG_SUPPORT_GFX_CGLS |
4806 AMD_CG_SUPPORT_GFX_3D_CGCG |
4807 AMD_CG_SUPPORT_GFX_3D_CGLS)))
4811 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4813 /* unset CGCG override */
4814 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
4815 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
4816 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4817 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4818 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG ||
4819 adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
4820 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
4822 /* update CGCG override bits */
4824 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4826 /* enable cgcg FSM(0x0000363F) */
4827 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
4829 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
4830 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK;
4831 data |= (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4832 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4835 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
4836 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK;
4837 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4838 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
4842 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data);
4844 /* Program RLC_CGCG_CGLS_CTRL_3D */
4845 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
4847 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) {
4848 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD_MASK;
4849 data |= (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4850 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4853 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) {
4854 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY_MASK;
4855 data |= (0xf << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4856 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
4860 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data);
4862 /* set IDLE_POLL_COUNT(0x00900100) */
4863 def = data = RREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL);
4865 data &= ~(CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK | CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK);
4866 data |= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4867 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4870 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL, data);
4872 data = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
4873 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1);
4874 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1);
4875 data = REG_SET_FIELD(data, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1);
4876 data = REG_SET_FIELD(data, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
4877 WREG32_SOC15(GC, 0, regCP_INT_CNTL, data);
4879 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL);
4880 data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
4881 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
4883 /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
4884 if (adev->sdma.num_instances > 1) {
4885 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
4886 data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
4887 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
4890 /* Program RLC_CGCG_CGLS_CTRL */
4891 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
4893 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
4894 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4896 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4897 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
4900 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data);
4902 /* Program RLC_CGCG_CGLS_CTRL_3D */
4903 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
4905 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
4906 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4907 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
4908 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
4911 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data);
4913 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL);
4914 data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
4915 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
4917 /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
4918 if (adev->sdma.num_instances > 1) {
4919 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
4920 data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
4921 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
4926 static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev,
4929 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
4931 gfx_v11_0_update_coarse_grain_clock_gating(adev, enable);
4933 gfx_v11_0_update_medium_grain_clock_gating(adev, enable);
4935 gfx_v11_0_update_repeater_fgcg(adev, enable);
4937 gfx_v11_0_update_sram_fgcg(adev, enable);
4939 gfx_v11_0_update_perf_clk(adev, enable);
4941 if (adev->cg_flags &
4942 (AMD_CG_SUPPORT_GFX_MGCG |
4943 AMD_CG_SUPPORT_GFX_CGLS |
4944 AMD_CG_SUPPORT_GFX_CGCG |
4945 AMD_CG_SUPPORT_GFX_3D_CGCG |
4946 AMD_CG_SUPPORT_GFX_3D_CGLS))
4947 gfx_v11_0_enable_gui_idle_interrupt(adev, enable);
4949 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
4954 static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
4958 amdgpu_gfx_off_ctrl(adev, false);
4960 reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL);
4961 if (amdgpu_sriov_is_pp_one_vf(adev))
4962 data = RREG32_NO_KIQ(reg);
4966 data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
4967 data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
4969 if (amdgpu_sriov_is_pp_one_vf(adev))
4970 WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data);
4972 WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data);
4974 amdgpu_gfx_off_ctrl(adev, true);
4977 static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = {
4978 .is_rlc_enabled = gfx_v11_0_is_rlc_enabled,
4979 .set_safe_mode = gfx_v11_0_set_safe_mode,
4980 .unset_safe_mode = gfx_v11_0_unset_safe_mode,
4981 .init = gfx_v11_0_rlc_init,
4982 .get_csb_size = gfx_v11_0_get_csb_size,
4983 .get_csb_buffer = gfx_v11_0_get_csb_buffer,
4984 .resume = gfx_v11_0_rlc_resume,
4985 .stop = gfx_v11_0_rlc_stop,
4986 .reset = gfx_v11_0_rlc_reset,
4987 .start = gfx_v11_0_rlc_start,
4988 .update_spm_vmid = gfx_v11_0_update_spm_vmid,
4991 static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable)
4993 u32 data = RREG32_SOC15(GC, 0, regRLC_PG_CNTL);
4995 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
4996 data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
4998 data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
5000 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, data);
5002 // Program RLC_PG_DELAY3 for CGPG hysteresis
5003 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
5004 switch (adev->ip_versions[GC_HWIP][0]) {
5005 case IP_VERSION(11, 0, 1):
5006 case IP_VERSION(11, 0, 4):
5007 WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1);
5015 static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable)
5017 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5019 gfx_v11_cntl_power_gating(adev, enable);
5021 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5024 static int gfx_v11_0_set_powergating_state(void *handle,
5025 enum amd_powergating_state state)
5027 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5028 bool enable = (state == AMD_PG_STATE_GATE);
5030 if (amdgpu_sriov_vf(adev))
5033 switch (adev->ip_versions[GC_HWIP][0]) {
5034 case IP_VERSION(11, 0, 0):
5035 case IP_VERSION(11, 0, 2):
5036 case IP_VERSION(11, 0, 3):
5037 amdgpu_gfx_off_ctrl(adev, enable);
5039 case IP_VERSION(11, 0, 1):
5040 case IP_VERSION(11, 0, 4):
5042 amdgpu_gfx_off_ctrl(adev, false);
5044 gfx_v11_cntl_pg(adev, enable);
5047 amdgpu_gfx_off_ctrl(adev, true);
5057 static int gfx_v11_0_set_clockgating_state(void *handle,
5058 enum amd_clockgating_state state)
5060 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5062 if (amdgpu_sriov_vf(adev))
5065 switch (adev->ip_versions[GC_HWIP][0]) {
5066 case IP_VERSION(11, 0, 0):
5067 case IP_VERSION(11, 0, 1):
5068 case IP_VERSION(11, 0, 2):
5069 case IP_VERSION(11, 0, 3):
5070 case IP_VERSION(11, 0, 4):
5071 gfx_v11_0_update_gfx_clock_gating(adev,
5072 state == AMD_CG_STATE_GATE);
5081 static void gfx_v11_0_get_clockgating_state(void *handle, u64 *flags)
5083 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5086 /* AMD_CG_SUPPORT_GFX_MGCG */
5087 data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5088 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
5089 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
5091 /* AMD_CG_SUPPORT_REPEATER_FGCG */
5092 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK))
5093 *flags |= AMD_CG_SUPPORT_REPEATER_FGCG;
5095 /* AMD_CG_SUPPORT_GFX_FGCG */
5096 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK))
5097 *flags |= AMD_CG_SUPPORT_GFX_FGCG;
5099 /* AMD_CG_SUPPORT_GFX_PERF_CLK */
5100 if (!(data & RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK))
5101 *flags |= AMD_CG_SUPPORT_GFX_PERF_CLK;
5103 /* AMD_CG_SUPPORT_GFX_CGCG */
5104 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
5105 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5106 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
5108 /* AMD_CG_SUPPORT_GFX_CGLS */
5109 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5110 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
5112 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
5113 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
5114 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
5115 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
5117 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
5118 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
5119 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
5122 static u64 gfx_v11_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
5124 /* gfx11 is 32bit rptr*/
5125 return *(uint32_t *)ring->rptr_cpu_addr;
5128 static u64 gfx_v11_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
5130 struct amdgpu_device *adev = ring->adev;
5133 /* XXX check if swapping is necessary on BE */
5134 if (ring->use_doorbell) {
5135 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5137 wptr = RREG32_SOC15(GC, 0, regCP_RB0_WPTR);
5138 wptr += (u64)RREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI) << 32;
5144 static void gfx_v11_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
5146 struct amdgpu_device *adev = ring->adev;
5147 uint32_t *wptr_saved;
5148 uint32_t *is_queue_unmap;
5149 uint64_t aggregated_db_index;
5150 uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_GFX].mqd_size;
5153 if (ring->is_mes_queue) {
5154 wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
5155 is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
5157 aggregated_db_index =
5158 amdgpu_mes_get_aggregated_doorbell_index(adev,
5161 wptr_tmp = ring->wptr & ring->buf_mask;
5162 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp);
5163 *wptr_saved = wptr_tmp;
5164 /* assume doorbell always being used by mes mapped queue */
5165 if (*is_queue_unmap) {
5166 WDOORBELL64(aggregated_db_index, wptr_tmp);
5167 WDOORBELL64(ring->doorbell_index, wptr_tmp);
5169 WDOORBELL64(ring->doorbell_index, wptr_tmp);
5171 if (*is_queue_unmap)
5172 WDOORBELL64(aggregated_db_index, wptr_tmp);
5175 if (ring->use_doorbell) {
5176 /* XXX check if swapping is necessary on BE */
5177 atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
5179 WDOORBELL64(ring->doorbell_index, ring->wptr);
5181 WREG32_SOC15(GC, 0, regCP_RB0_WPTR,
5182 lower_32_bits(ring->wptr));
5183 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI,
5184 upper_32_bits(ring->wptr));
5189 static u64 gfx_v11_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
5191 /* gfx11 hardware is 32bit rptr */
5192 return *(uint32_t *)ring->rptr_cpu_addr;
5195 static u64 gfx_v11_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
5199 /* XXX check if swapping is necessary on BE */
5200 if (ring->use_doorbell)
5201 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5207 static void gfx_v11_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
5209 struct amdgpu_device *adev = ring->adev;
5210 uint32_t *wptr_saved;
5211 uint32_t *is_queue_unmap;
5212 uint64_t aggregated_db_index;
5213 uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size;
5216 if (ring->is_mes_queue) {
5217 wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
5218 is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
5220 aggregated_db_index =
5221 amdgpu_mes_get_aggregated_doorbell_index(adev,
5224 wptr_tmp = ring->wptr & ring->buf_mask;
5225 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp);
5226 *wptr_saved = wptr_tmp;
5227 /* assume doorbell always used by mes mapped queue */
5228 if (*is_queue_unmap) {
5229 WDOORBELL64(aggregated_db_index, wptr_tmp);
5230 WDOORBELL64(ring->doorbell_index, wptr_tmp);
5232 WDOORBELL64(ring->doorbell_index, wptr_tmp);
5234 if (*is_queue_unmap)
5235 WDOORBELL64(aggregated_db_index, wptr_tmp);
5238 /* XXX check if swapping is necessary on BE */
5239 if (ring->use_doorbell) {
5240 atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
5242 WDOORBELL64(ring->doorbell_index, ring->wptr);
5244 BUG(); /* only DOORBELL method supported on gfx11 now */
5249 static void gfx_v11_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
5251 struct amdgpu_device *adev = ring->adev;
5252 u32 ref_and_mask, reg_mem_engine;
5253 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
5255 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
5258 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
5261 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
5268 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
5269 reg_mem_engine = 1; /* pfp */
5272 gfx_v11_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
5273 adev->nbio.funcs->get_hdp_flush_req_offset(adev),
5274 adev->nbio.funcs->get_hdp_flush_done_offset(adev),
5275 ref_and_mask, ref_and_mask, 0x20);
5278 static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
5279 struct amdgpu_job *job,
5280 struct amdgpu_ib *ib,
5283 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5284 u32 header, control = 0;
5286 BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE);
5288 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
5290 control |= ib->length_dw | (vmid << 24);
5292 if (ring->adev->gfx.mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
5293 control |= INDIRECT_BUFFER_PRE_ENB(1);
5295 if (flags & AMDGPU_IB_PREEMPTED)
5296 control |= INDIRECT_BUFFER_PRE_RESUME(1);
5299 gfx_v11_0_ring_emit_de_meta(ring,
5300 (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
5303 if (ring->is_mes_queue)
5304 /* inherit vmid from mqd */
5305 control |= 0x400000;
5307 amdgpu_ring_write(ring, header);
5308 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5309 amdgpu_ring_write(ring,
5313 lower_32_bits(ib->gpu_addr));
5314 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5315 amdgpu_ring_write(ring, control);
5318 static void gfx_v11_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
5319 struct amdgpu_job *job,
5320 struct amdgpu_ib *ib,
5323 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5324 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
5326 if (ring->is_mes_queue)
5327 /* inherit vmid from mqd */
5328 control |= 0x40000000;
5330 /* Currently, there is a high possibility to get wave ID mismatch
5331 * between ME and GDS, leading to a hw deadlock, because ME generates
5332 * different wave IDs than the GDS expects. This situation happens
5333 * randomly when at least 5 compute pipes use GDS ordered append.
5334 * The wave IDs generated by ME are also wrong after suspend/resume.
5335 * Those are probably bugs somewhere else in the kernel driver.
5337 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
5338 * GDS to 0 for this ring (me/pipe).
5340 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
5341 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
5342 amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
5343 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
5346 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
5347 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5348 amdgpu_ring_write(ring,
5352 lower_32_bits(ib->gpu_addr));
5353 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5354 amdgpu_ring_write(ring, control);
5357 static void gfx_v11_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
5358 u64 seq, unsigned flags)
5360 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
5361 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
5363 /* RELEASE_MEM - flush caches, send int */
5364 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
5365 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ |
5366 PACKET3_RELEASE_MEM_GCR_GL2_WB |
5367 PACKET3_RELEASE_MEM_GCR_GL2_INV |
5368 PACKET3_RELEASE_MEM_GCR_GL2_US |
5369 PACKET3_RELEASE_MEM_GCR_GL1_INV |
5370 PACKET3_RELEASE_MEM_GCR_GLV_INV |
5371 PACKET3_RELEASE_MEM_GCR_GLM_INV |
5372 PACKET3_RELEASE_MEM_GCR_GLM_WB |
5373 PACKET3_RELEASE_MEM_CACHE_POLICY(3) |
5374 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
5375 PACKET3_RELEASE_MEM_EVENT_INDEX(5)));
5376 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) |
5377 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0)));
5380 * the address should be Qword aligned if 64bit write, Dword
5381 * aligned if only send 32bit data low (discard data high)
5387 amdgpu_ring_write(ring, lower_32_bits(addr));
5388 amdgpu_ring_write(ring, upper_32_bits(addr));
5389 amdgpu_ring_write(ring, lower_32_bits(seq));
5390 amdgpu_ring_write(ring, upper_32_bits(seq));
5391 amdgpu_ring_write(ring, ring->is_mes_queue ?
5392 (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0);
5395 static void gfx_v11_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
5397 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5398 uint32_t seq = ring->fence_drv.sync_seq;
5399 uint64_t addr = ring->fence_drv.gpu_addr;
5401 gfx_v11_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr),
5402 upper_32_bits(addr), seq, 0xffffffff, 4);
5405 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
5406 uint16_t pasid, uint32_t flush_type,
5407 bool all_hub, uint8_t dst_sel)
5409 amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
5410 amdgpu_ring_write(ring,
5411 PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) |
5412 PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
5413 PACKET3_INVALIDATE_TLBS_PASID(pasid) |
5414 PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
5417 static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
5418 unsigned vmid, uint64_t pd_addr)
5420 if (ring->is_mes_queue)
5421 gfx_v11_0_ring_invalidate_tlbs(ring, 0, 0, false, 0);
5423 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
5425 /* compute doesn't have PFP */
5426 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
5427 /* sync PFP to ME, otherwise we might get invalid PFP reads */
5428 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5429 amdgpu_ring_write(ring, 0x0);
5433 static void gfx_v11_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
5434 u64 seq, unsigned int flags)
5436 struct amdgpu_device *adev = ring->adev;
5438 /* we only allocate 32bit for each seq wb address */
5439 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
5441 /* write fence seq to the "addr" */
5442 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5443 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5444 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
5445 amdgpu_ring_write(ring, lower_32_bits(addr));
5446 amdgpu_ring_write(ring, upper_32_bits(addr));
5447 amdgpu_ring_write(ring, lower_32_bits(seq));
5449 if (flags & AMDGPU_FENCE_FLAG_INT) {
5450 /* set register to trigger INT */
5451 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5452 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5453 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
5454 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, regCPC_INT_STATUS));
5455 amdgpu_ring_write(ring, 0);
5456 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
5460 static void gfx_v11_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
5465 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
5466 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
5467 /* set load_global_config & load_global_uconfig */
5469 /* set load_cs_sh_regs */
5471 /* set load_per_context_state & load_gfx_sh_regs for GFX */
5475 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5476 amdgpu_ring_write(ring, dw2);
5477 amdgpu_ring_write(ring, 0);
5480 static void gfx_v11_0_ring_emit_gfx_shadow(struct amdgpu_ring *ring,
5481 u64 shadow_va, u64 csa_va,
5482 u64 gds_va, bool init_shadow,
5485 struct amdgpu_device *adev = ring->adev;
5487 if (!adev->gfx.cp_gfx_shadow)
5490 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_Q_PREEMPTION_MODE, 7));
5491 amdgpu_ring_write(ring, lower_32_bits(shadow_va));
5492 amdgpu_ring_write(ring, upper_32_bits(shadow_va));
5493 amdgpu_ring_write(ring, lower_32_bits(gds_va));
5494 amdgpu_ring_write(ring, upper_32_bits(gds_va));
5495 amdgpu_ring_write(ring, lower_32_bits(csa_va));
5496 amdgpu_ring_write(ring, upper_32_bits(csa_va));
5497 amdgpu_ring_write(ring, shadow_va ?
5498 PACKET3_SET_Q_PREEMPTION_MODE_IB_VMID(vmid) : 0);
5499 amdgpu_ring_write(ring, init_shadow ?
5500 PACKET3_SET_Q_PREEMPTION_MODE_INIT_SHADOW_MEM : 0);
5503 static unsigned gfx_v11_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
5507 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
5508 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
5509 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
5510 amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
5511 ret = ring->wptr & ring->buf_mask;
5512 amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
5517 static void gfx_v11_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
5520 BUG_ON(offset > ring->buf_mask);
5521 BUG_ON(ring->ring[offset] != 0x55aa55aa);
5523 cur = (ring->wptr - 1) & ring->buf_mask;
5524 if (likely(cur > offset))
5525 ring->ring[offset] = cur - offset;
5527 ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
5530 static int gfx_v11_0_ring_preempt_ib(struct amdgpu_ring *ring)
5533 struct amdgpu_device *adev = ring->adev;
5534 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
5535 struct amdgpu_ring *kiq_ring = &kiq->ring;
5536 unsigned long flags;
5538 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
5541 spin_lock_irqsave(&kiq->ring_lock, flags);
5543 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
5544 spin_unlock_irqrestore(&kiq->ring_lock, flags);
5548 /* assert preemption condition */
5549 amdgpu_ring_set_preempt_cond_exec(ring, false);
5551 /* assert IB preemption, emit the trailing fence */
5552 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP,
5553 ring->trail_fence_gpu_addr,
5555 amdgpu_ring_commit(kiq_ring);
5557 spin_unlock_irqrestore(&kiq->ring_lock, flags);
5559 /* poll the trailing fence */
5560 for (i = 0; i < adev->usec_timeout; i++) {
5561 if (ring->trail_seq ==
5562 le32_to_cpu(*(ring->trail_fence_cpu_addr)))
5567 if (i >= adev->usec_timeout) {
5569 DRM_ERROR("ring %d failed to preempt ib\n", ring->idx);
5572 /* deassert preemption condition */
5573 amdgpu_ring_set_preempt_cond_exec(ring, true);
5577 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
5579 struct amdgpu_device *adev = ring->adev;
5580 struct v10_de_ib_state de_payload = {0};
5581 uint64_t offset, gds_addr, de_payload_gpu_addr;
5582 void *de_payload_cpu_addr;
5585 if (ring->is_mes_queue) {
5586 offset = offsetof(struct amdgpu_mes_ctx_meta_data,
5587 gfx[0].gfx_meta_data) +
5588 offsetof(struct v10_gfx_meta_data, de_payload);
5589 de_payload_gpu_addr =
5590 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
5591 de_payload_cpu_addr =
5592 amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
5594 offset = offsetof(struct amdgpu_mes_ctx_meta_data,
5595 gfx[0].gds_backup) +
5596 offsetof(struct v10_gfx_meta_data, de_payload);
5597 gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
5599 offset = offsetof(struct v10_gfx_meta_data, de_payload);
5600 de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
5601 de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
5603 gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
5604 AMDGPU_CSA_SIZE - adev->gds.gds_size,
5608 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
5609 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
5611 cnt = (sizeof(de_payload) >> 2) + 4 - 2;
5612 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5613 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5614 WRITE_DATA_DST_SEL(8) |
5616 WRITE_DATA_CACHE_POLICY(0));
5617 amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr));
5618 amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr));
5621 amdgpu_ring_write_multiple(ring, de_payload_cpu_addr,
5622 sizeof(de_payload) >> 2);
5624 amdgpu_ring_write_multiple(ring, (void *)&de_payload,
5625 sizeof(de_payload) >> 2);
5628 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
5631 uint32_t v = secure ? FRAME_TMZ : 0;
5633 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
5634 amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
5637 static void gfx_v11_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
5638 uint32_t reg_val_offs)
5640 struct amdgpu_device *adev = ring->adev;
5642 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
5643 amdgpu_ring_write(ring, 0 | /* src: register*/
5644 (5 << 8) | /* dst: memory */
5645 (1 << 20)); /* write confirm */
5646 amdgpu_ring_write(ring, reg);
5647 amdgpu_ring_write(ring, 0);
5648 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
5650 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
5654 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
5659 switch (ring->funcs->type) {
5660 case AMDGPU_RING_TYPE_GFX:
5661 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
5663 case AMDGPU_RING_TYPE_KIQ:
5664 cmd = (1 << 16); /* no inc addr */
5670 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5671 amdgpu_ring_write(ring, cmd);
5672 amdgpu_ring_write(ring, reg);
5673 amdgpu_ring_write(ring, 0);
5674 amdgpu_ring_write(ring, val);
5677 static void gfx_v11_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
5678 uint32_t val, uint32_t mask)
5680 gfx_v11_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
5683 static void gfx_v11_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
5684 uint32_t reg0, uint32_t reg1,
5685 uint32_t ref, uint32_t mask)
5687 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5689 gfx_v11_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
5693 static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring,
5696 struct amdgpu_device *adev = ring->adev;
5699 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
5700 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
5701 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
5702 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
5703 WREG32_SOC15(GC, 0, regSQ_CMD, value);
5707 gfx_v11_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
5708 uint32_t me, uint32_t pipe,
5709 enum amdgpu_interrupt_state state)
5711 uint32_t cp_int_cntl, cp_int_cntl_reg;
5716 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0);
5719 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1);
5722 DRM_DEBUG("invalid pipe %d\n", pipe);
5726 DRM_DEBUG("invalid me %d\n", me);
5731 case AMDGPU_IRQ_STATE_DISABLE:
5732 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
5733 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
5734 TIME_STAMP_INT_ENABLE, 0);
5735 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
5736 GENERIC0_INT_ENABLE, 0);
5737 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
5739 case AMDGPU_IRQ_STATE_ENABLE:
5740 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
5741 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
5742 TIME_STAMP_INT_ENABLE, 1);
5743 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
5744 GENERIC0_INT_ENABLE, 1);
5745 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
5752 static void gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
5754 enum amdgpu_interrupt_state state)
5756 u32 mec_int_cntl, mec_int_cntl_reg;
5759 * amdgpu controls only the first MEC. That's why this function only
5760 * handles the setting of interrupts for this specific MEC. All other
5761 * pipes' interrupts are set by amdkfd.
5767 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
5770 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL);
5773 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL);
5776 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL);
5779 DRM_DEBUG("invalid pipe %d\n", pipe);
5783 DRM_DEBUG("invalid me %d\n", me);
5788 case AMDGPU_IRQ_STATE_DISABLE:
5789 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
5790 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5791 TIME_STAMP_INT_ENABLE, 0);
5792 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5793 GENERIC0_INT_ENABLE, 0);
5794 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
5796 case AMDGPU_IRQ_STATE_ENABLE:
5797 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
5798 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5799 TIME_STAMP_INT_ENABLE, 1);
5800 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5801 GENERIC0_INT_ENABLE, 1);
5802 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
5809 static int gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev,
5810 struct amdgpu_irq_src *src,
5812 enum amdgpu_interrupt_state state)
5815 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
5816 gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 0, state);
5818 case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP:
5819 gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 1, state);
5821 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
5822 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
5824 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
5825 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
5827 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
5828 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
5830 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
5831 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
5839 static int gfx_v11_0_eop_irq(struct amdgpu_device *adev,
5840 struct amdgpu_irq_src *source,
5841 struct amdgpu_iv_entry *entry)
5844 u8 me_id, pipe_id, queue_id;
5845 struct amdgpu_ring *ring;
5846 uint32_t mes_queue_id = entry->src_data[0];
5848 DRM_DEBUG("IH: CP EOP\n");
5850 if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
5851 struct amdgpu_mes_queue *queue;
5853 mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
5855 spin_lock(&adev->mes.queue_id_lock);
5856 queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
5858 DRM_DEBUG("process mes queue id = %d\n", mes_queue_id);
5859 amdgpu_fence_process(queue->ring);
5861 spin_unlock(&adev->mes.queue_id_lock);
5863 me_id = (entry->ring_id & 0x0c) >> 2;
5864 pipe_id = (entry->ring_id & 0x03) >> 0;
5865 queue_id = (entry->ring_id & 0x70) >> 4;
5870 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
5872 amdgpu_fence_process(&adev->gfx.gfx_ring[1]);
5876 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5877 ring = &adev->gfx.compute_ring[i];
5878 /* Per-queue interrupt is supported for MEC starting from VI.
5879 * The interrupt can only be enabled/disabled per pipe instead
5882 if ((ring->me == me_id) &&
5883 (ring->pipe == pipe_id) &&
5884 (ring->queue == queue_id))
5885 amdgpu_fence_process(ring);
5894 static int gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
5895 struct amdgpu_irq_src *source,
5897 enum amdgpu_interrupt_state state)
5900 case AMDGPU_IRQ_STATE_DISABLE:
5901 case AMDGPU_IRQ_STATE_ENABLE:
5902 WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0,
5903 PRIV_REG_INT_ENABLE,
5904 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5913 static int gfx_v11_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
5914 struct amdgpu_irq_src *source,
5916 enum amdgpu_interrupt_state state)
5919 case AMDGPU_IRQ_STATE_DISABLE:
5920 case AMDGPU_IRQ_STATE_ENABLE:
5921 WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0,
5922 PRIV_INSTR_INT_ENABLE,
5923 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5932 static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev,
5933 struct amdgpu_iv_entry *entry)
5935 u8 me_id, pipe_id, queue_id;
5936 struct amdgpu_ring *ring;
5939 me_id = (entry->ring_id & 0x0c) >> 2;
5940 pipe_id = (entry->ring_id & 0x03) >> 0;
5941 queue_id = (entry->ring_id & 0x70) >> 4;
5945 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
5946 ring = &adev->gfx.gfx_ring[i];
5947 /* we only enabled 1 gfx queue per pipe for now */
5948 if (ring->me == me_id && ring->pipe == pipe_id)
5949 drm_sched_fault(&ring->sched);
5954 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5955 ring = &adev->gfx.compute_ring[i];
5956 if (ring->me == me_id && ring->pipe == pipe_id &&
5957 ring->queue == queue_id)
5958 drm_sched_fault(&ring->sched);
5967 static int gfx_v11_0_priv_reg_irq(struct amdgpu_device *adev,
5968 struct amdgpu_irq_src *source,
5969 struct amdgpu_iv_entry *entry)
5971 DRM_ERROR("Illegal register access in command stream\n");
5972 gfx_v11_0_handle_priv_fault(adev, entry);
5976 static int gfx_v11_0_priv_inst_irq(struct amdgpu_device *adev,
5977 struct amdgpu_irq_src *source,
5978 struct amdgpu_iv_entry *entry)
5980 DRM_ERROR("Illegal instruction in command stream\n");
5981 gfx_v11_0_handle_priv_fault(adev, entry);
5985 static int gfx_v11_0_rlc_gc_fed_irq(struct amdgpu_device *adev,
5986 struct amdgpu_irq_src *source,
5987 struct amdgpu_iv_entry *entry)
5989 if (adev->gfx.ras && adev->gfx.ras->rlc_gc_fed_irq)
5990 return adev->gfx.ras->rlc_gc_fed_irq(adev, source, entry);
5996 static int gfx_v11_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
5997 struct amdgpu_irq_src *src,
5999 enum amdgpu_interrupt_state state)
6001 uint32_t tmp, target;
6002 struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring);
6004 target = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
6005 target += ring->pipe;
6008 case AMDGPU_CP_KIQ_IRQ_DRIVER0:
6009 if (state == AMDGPU_IRQ_STATE_DISABLE) {
6010 tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL);
6011 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
6012 GENERIC2_INT_ENABLE, 0);
6013 WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp);
6015 tmp = RREG32_SOC15_IP(GC, target);
6016 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL,
6017 GENERIC2_INT_ENABLE, 0);
6018 WREG32_SOC15_IP(GC, target, tmp);
6020 tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL);
6021 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
6022 GENERIC2_INT_ENABLE, 1);
6023 WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp);
6025 tmp = RREG32_SOC15_IP(GC, target);
6026 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL,
6027 GENERIC2_INT_ENABLE, 1);
6028 WREG32_SOC15_IP(GC, target, tmp);
6032 BUG(); /* kiq only support GENERIC2_INT now */
6039 static void gfx_v11_0_emit_mem_sync(struct amdgpu_ring *ring)
6041 const unsigned int gcr_cntl =
6042 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) |
6043 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) |
6044 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) |
6045 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) |
6046 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) |
6047 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) |
6048 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) |
6049 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1);
6051 /* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */
6052 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6));
6053 amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */
6054 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
6055 amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */
6056 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
6057 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
6058 amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
6059 amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
6062 static const struct amd_ip_funcs gfx_v11_0_ip_funcs = {
6063 .name = "gfx_v11_0",
6064 .early_init = gfx_v11_0_early_init,
6065 .late_init = gfx_v11_0_late_init,
6066 .sw_init = gfx_v11_0_sw_init,
6067 .sw_fini = gfx_v11_0_sw_fini,
6068 .hw_init = gfx_v11_0_hw_init,
6069 .hw_fini = gfx_v11_0_hw_fini,
6070 .suspend = gfx_v11_0_suspend,
6071 .resume = gfx_v11_0_resume,
6072 .is_idle = gfx_v11_0_is_idle,
6073 .wait_for_idle = gfx_v11_0_wait_for_idle,
6074 .soft_reset = gfx_v11_0_soft_reset,
6075 .check_soft_reset = gfx_v11_0_check_soft_reset,
6076 .post_soft_reset = gfx_v11_0_post_soft_reset,
6077 .set_clockgating_state = gfx_v11_0_set_clockgating_state,
6078 .set_powergating_state = gfx_v11_0_set_powergating_state,
6079 .get_clockgating_state = gfx_v11_0_get_clockgating_state,
6082 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
6083 .type = AMDGPU_RING_TYPE_GFX,
6085 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6086 .support_64bit_ptrs = true,
6087 .secure_submission_supported = true,
6088 .get_rptr = gfx_v11_0_ring_get_rptr_gfx,
6089 .get_wptr = gfx_v11_0_ring_get_wptr_gfx,
6090 .set_wptr = gfx_v11_0_ring_set_wptr_gfx,
6091 .emit_frame_size = /* totally 242 maximum if 16 IBs */
6093 9 + /* SET_Q_PREEMPTION_MODE */
6094 7 + /* PIPELINE_SYNC */
6095 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6096 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6098 8 + /* FENCE for VM_FLUSH */
6099 20 + /* GDS switch */
6106 8 + 8 + /* FENCE x2 */
6107 8, /* gfx_v11_0_emit_mem_sync */
6108 .emit_ib_size = 4, /* gfx_v11_0_ring_emit_ib_gfx */
6109 .emit_ib = gfx_v11_0_ring_emit_ib_gfx,
6110 .emit_fence = gfx_v11_0_ring_emit_fence,
6111 .emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync,
6112 .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush,
6113 .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch,
6114 .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
6115 .test_ring = gfx_v11_0_ring_test_ring,
6116 .test_ib = gfx_v11_0_ring_test_ib,
6117 .insert_nop = amdgpu_ring_insert_nop,
6118 .pad_ib = amdgpu_ring_generic_pad_ib,
6119 .emit_cntxcntl = gfx_v11_0_ring_emit_cntxcntl,
6120 .emit_gfx_shadow = gfx_v11_0_ring_emit_gfx_shadow,
6121 .init_cond_exec = gfx_v11_0_ring_emit_init_cond_exec,
6122 .patch_cond_exec = gfx_v11_0_ring_emit_patch_cond_exec,
6123 .preempt_ib = gfx_v11_0_ring_preempt_ib,
6124 .emit_frame_cntl = gfx_v11_0_ring_emit_frame_cntl,
6125 .emit_wreg = gfx_v11_0_ring_emit_wreg,
6126 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
6127 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
6128 .soft_recovery = gfx_v11_0_ring_soft_recovery,
6129 .emit_mem_sync = gfx_v11_0_emit_mem_sync,
6132 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
6133 .type = AMDGPU_RING_TYPE_COMPUTE,
6135 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6136 .support_64bit_ptrs = true,
6137 .get_rptr = gfx_v11_0_ring_get_rptr_compute,
6138 .get_wptr = gfx_v11_0_ring_get_wptr_compute,
6139 .set_wptr = gfx_v11_0_ring_set_wptr_compute,
6141 20 + /* gfx_v11_0_ring_emit_gds_switch */
6142 7 + /* gfx_v11_0_ring_emit_hdp_flush */
6143 5 + /* hdp invalidate */
6144 7 + /* gfx_v11_0_ring_emit_pipeline_sync */
6145 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6146 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6147 2 + /* gfx_v11_0_ring_emit_vm_flush */
6148 8 + 8 + 8 + /* gfx_v11_0_ring_emit_fence x3 for user fence, vm fence */
6149 8, /* gfx_v11_0_emit_mem_sync */
6150 .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */
6151 .emit_ib = gfx_v11_0_ring_emit_ib_compute,
6152 .emit_fence = gfx_v11_0_ring_emit_fence,
6153 .emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync,
6154 .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush,
6155 .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch,
6156 .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
6157 .test_ring = gfx_v11_0_ring_test_ring,
6158 .test_ib = gfx_v11_0_ring_test_ib,
6159 .insert_nop = amdgpu_ring_insert_nop,
6160 .pad_ib = amdgpu_ring_generic_pad_ib,
6161 .emit_wreg = gfx_v11_0_ring_emit_wreg,
6162 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
6163 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
6164 .emit_mem_sync = gfx_v11_0_emit_mem_sync,
6167 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
6168 .type = AMDGPU_RING_TYPE_KIQ,
6170 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6171 .support_64bit_ptrs = true,
6172 .get_rptr = gfx_v11_0_ring_get_rptr_compute,
6173 .get_wptr = gfx_v11_0_ring_get_wptr_compute,
6174 .set_wptr = gfx_v11_0_ring_set_wptr_compute,
6176 20 + /* gfx_v11_0_ring_emit_gds_switch */
6177 7 + /* gfx_v11_0_ring_emit_hdp_flush */
6178 5 + /*hdp invalidate */
6179 7 + /* gfx_v11_0_ring_emit_pipeline_sync */
6180 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6181 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6182 2 + /* gfx_v11_0_ring_emit_vm_flush */
6183 8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */
6184 .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */
6185 .emit_ib = gfx_v11_0_ring_emit_ib_compute,
6186 .emit_fence = gfx_v11_0_ring_emit_fence_kiq,
6187 .test_ring = gfx_v11_0_ring_test_ring,
6188 .test_ib = gfx_v11_0_ring_test_ib,
6189 .insert_nop = amdgpu_ring_insert_nop,
6190 .pad_ib = amdgpu_ring_generic_pad_ib,
6191 .emit_rreg = gfx_v11_0_ring_emit_rreg,
6192 .emit_wreg = gfx_v11_0_ring_emit_wreg,
6193 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
6194 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
6197 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev)
6201 adev->gfx.kiq[0].ring.funcs = &gfx_v11_0_ring_funcs_kiq;
6203 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
6204 adev->gfx.gfx_ring[i].funcs = &gfx_v11_0_ring_funcs_gfx;
6206 for (i = 0; i < adev->gfx.num_compute_rings; i++)
6207 adev->gfx.compute_ring[i].funcs = &gfx_v11_0_ring_funcs_compute;
6210 static const struct amdgpu_irq_src_funcs gfx_v11_0_eop_irq_funcs = {
6211 .set = gfx_v11_0_set_eop_interrupt_state,
6212 .process = gfx_v11_0_eop_irq,
6215 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_reg_irq_funcs = {
6216 .set = gfx_v11_0_set_priv_reg_fault_state,
6217 .process = gfx_v11_0_priv_reg_irq,
6220 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = {
6221 .set = gfx_v11_0_set_priv_inst_fault_state,
6222 .process = gfx_v11_0_priv_inst_irq,
6225 static const struct amdgpu_irq_src_funcs gfx_v11_0_rlc_gc_fed_irq_funcs = {
6226 .process = gfx_v11_0_rlc_gc_fed_irq,
6229 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev)
6231 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
6232 adev->gfx.eop_irq.funcs = &gfx_v11_0_eop_irq_funcs;
6234 adev->gfx.priv_reg_irq.num_types = 1;
6235 adev->gfx.priv_reg_irq.funcs = &gfx_v11_0_priv_reg_irq_funcs;
6237 adev->gfx.priv_inst_irq.num_types = 1;
6238 adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs;
6240 adev->gfx.rlc_gc_fed_irq.num_types = 1; /* 0x80 FED error */
6241 adev->gfx.rlc_gc_fed_irq.funcs = &gfx_v11_0_rlc_gc_fed_irq_funcs;
6245 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev)
6247 if (adev->flags & AMD_IS_APU)
6248 adev->gfx.imu.mode = MISSION_MODE;
6250 adev->gfx.imu.mode = DEBUG_MODE;
6252 adev->gfx.imu.funcs = &gfx_v11_0_imu_funcs;
6255 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev)
6257 adev->gfx.rlc.funcs = &gfx_v11_0_rlc_funcs;
6260 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev)
6262 unsigned total_cu = adev->gfx.config.max_cu_per_sh *
6263 adev->gfx.config.max_sh_per_se *
6264 adev->gfx.config.max_shader_engines;
6266 adev->gds.gds_size = 0x1000;
6267 adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1;
6268 adev->gds.gws_size = 64;
6269 adev->gds.oa_size = 16;
6272 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev)
6274 /* set gfx eng mqd */
6275 adev->mqds[AMDGPU_HW_IP_GFX].mqd_size =
6276 sizeof(struct v11_gfx_mqd);
6277 adev->mqds[AMDGPU_HW_IP_GFX].init_mqd =
6278 gfx_v11_0_gfx_mqd_init;
6279 /* set compute eng mqd */
6280 adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size =
6281 sizeof(struct v11_compute_mqd);
6282 adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd =
6283 gfx_v11_0_compute_mqd_init;
6286 static void gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev,
6294 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
6295 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
6297 WREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG, data);
6300 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev)
6302 u32 data, wgp_bitmask;
6303 data = RREG32_SOC15(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG);
6304 data |= RREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG);
6306 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
6307 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
6310 amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1);
6312 return (~data) & wgp_bitmask;
6315 static u32 gfx_v11_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev)
6317 u32 wgp_idx, wgp_active_bitmap;
6318 u32 cu_bitmap_per_wgp, cu_active_bitmap;
6320 wgp_active_bitmap = gfx_v11_0_get_wgp_active_bitmap_per_sh(adev);
6321 cu_active_bitmap = 0;
6323 for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) {
6324 /* if there is one WGP enabled, it means 2 CUs will be enabled */
6325 cu_bitmap_per_wgp = 3 << (2 * wgp_idx);
6326 if (wgp_active_bitmap & (1 << wgp_idx))
6327 cu_active_bitmap |= cu_bitmap_per_wgp;
6330 return cu_active_bitmap;
6333 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
6334 struct amdgpu_cu_info *cu_info)
6336 int i, j, k, counter, active_cu_number = 0;
6338 unsigned disable_masks[8 * 2];
6340 if (!adev || !cu_info)
6343 amdgpu_gfx_parse_disable_cu(disable_masks, 8, 2);
6345 mutex_lock(&adev->grbm_idx_mutex);
6346 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
6347 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
6350 gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff, 0);
6352 gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(
6353 adev, disable_masks[i * 2 + j]);
6354 bitmap = gfx_v11_0_get_cu_active_bitmap_per_sh(adev);
6357 * GFX11 could support more than 4 SEs, while the bitmap
6358 * in cu_info struct is 4x4 and ioctl interface struct
6359 * drm_amdgpu_info_device should keep stable.
6360 * So we use last two columns of bitmap to store cu mask for
6361 * SEs 4 to 7, the layout of the bitmap is as below:
6362 * SE0: {SH0,SH1} --> {bitmap[0][0], bitmap[0][1]}
6363 * SE1: {SH0,SH1} --> {bitmap[1][0], bitmap[1][1]}
6364 * SE2: {SH0,SH1} --> {bitmap[2][0], bitmap[2][1]}
6365 * SE3: {SH0,SH1} --> {bitmap[3][0], bitmap[3][1]}
6366 * SE4: {SH0,SH1} --> {bitmap[0][2], bitmap[0][3]}
6367 * SE5: {SH0,SH1} --> {bitmap[1][2], bitmap[1][3]}
6368 * SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]}
6369 * SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]}
6371 cu_info->bitmap[0][i % 4][j + (i / 4) * 2] = bitmap;
6373 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
6379 active_cu_number += counter;
6382 gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
6383 mutex_unlock(&adev->grbm_idx_mutex);
6385 cu_info->number = active_cu_number;
6386 cu_info->simd_per_cu = NUM_SIMD_PER_CU;
6391 const struct amdgpu_ip_block_version gfx_v11_0_ip_block =
6393 .type = AMD_IP_BLOCK_TYPE_GFX,
6397 .funcs = &gfx_v11_0_ip_funcs,