2 * Copyright 2021 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/delay.h>
24 #include <linux/kernel.h>
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
29 #include "amdgpu_gfx.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_smu.h"
32 #include "amdgpu_atomfirmware.h"
33 #include "imu_v11_0.h"
37 #include "gc/gc_11_0_0_offset.h"
38 #include "gc/gc_11_0_0_sh_mask.h"
39 #include "smuio/smuio_13_0_6_offset.h"
40 #include "smuio/smuio_13_0_6_sh_mask.h"
41 #include "navi10_enum.h"
42 #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
46 #include "clearstate_gfx11.h"
47 #include "v11_structs.h"
48 #include "gfx_v11_0.h"
49 #include "gfx_v11_0_3.h"
50 #include "nbio_v4_3.h"
51 #include "mes_v11_0.h"
53 #define GFX11_NUM_GFX_RINGS 1
54 #define GFX11_MEC_HPD_SIZE 2048
56 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
57 #define RLC_PG_DELAY_3_DEFAULT_GC_11_0_1 0x1388
59 #define regCGTT_WD_CLK_CTRL 0x5086
60 #define regCGTT_WD_CLK_CTRL_BASE_IDX 1
61 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1 0x4e7e
62 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1_BASE_IDX 1
63 #define regPC_CONFIG_CNTL_1 0x194d
64 #define regPC_CONFIG_CNTL_1_BASE_IDX 1
66 MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin");
67 MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin");
68 MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin");
69 MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin");
70 MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin");
71 MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin");
72 MODULE_FIRMWARE("amdgpu/gc_11_0_1_me.bin");
73 MODULE_FIRMWARE("amdgpu/gc_11_0_1_mec.bin");
74 MODULE_FIRMWARE("amdgpu/gc_11_0_1_rlc.bin");
75 MODULE_FIRMWARE("amdgpu/gc_11_0_2_pfp.bin");
76 MODULE_FIRMWARE("amdgpu/gc_11_0_2_me.bin");
77 MODULE_FIRMWARE("amdgpu/gc_11_0_2_mec.bin");
78 MODULE_FIRMWARE("amdgpu/gc_11_0_2_rlc.bin");
79 MODULE_FIRMWARE("amdgpu/gc_11_0_3_pfp.bin");
80 MODULE_FIRMWARE("amdgpu/gc_11_0_3_me.bin");
81 MODULE_FIRMWARE("amdgpu/gc_11_0_3_mec.bin");
82 MODULE_FIRMWARE("amdgpu/gc_11_0_3_rlc.bin");
83 MODULE_FIRMWARE("amdgpu/gc_11_0_4_pfp.bin");
84 MODULE_FIRMWARE("amdgpu/gc_11_0_4_me.bin");
85 MODULE_FIRMWARE("amdgpu/gc_11_0_4_mec.bin");
86 MODULE_FIRMWARE("amdgpu/gc_11_0_4_rlc.bin");
87 MODULE_FIRMWARE("amdgpu/gc_11_5_0_pfp.bin");
88 MODULE_FIRMWARE("amdgpu/gc_11_5_0_me.bin");
89 MODULE_FIRMWARE("amdgpu/gc_11_5_0_mec.bin");
90 MODULE_FIRMWARE("amdgpu/gc_11_5_0_rlc.bin");
92 static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
94 SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_GS_NGG_CLK_CTRL, 0x9fff8fff, 0x00000010),
95 SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_WD_CLK_CTRL, 0xffff8fff, 0x00000010),
96 SOC15_REG_GOLDEN_VALUE(GC, 0, regCPF_GCR_CNTL, 0x0007ffff, 0x0000c200),
97 SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xffff001b, 0x00f01988),
98 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf0ffffff, 0x00880007),
99 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_ENHANCE_3, 0xfffffffd, 0x00000008),
100 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_VRS_SURFACE_CNTL_1, 0xfff891ff, 0x55480100),
101 SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000),
102 SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xfcffffff, 0x0000000a)
105 static const struct soc15_reg_golden golden_settings_gc_11_5_0[] = {
106 SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_DEBUG5, 0xffffffff, 0x00000800),
107 SOC15_REG_GOLDEN_VALUE(GC, 0, regGB_ADDR_CONFIG, 0x0c1807ff, 0x00000242),
108 SOC15_REG_GOLDEN_VALUE(GC, 0, regGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
109 SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3),
110 SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3),
111 SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL, 0xffffffff, 0xf37fff3f),
112 SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xfffffffb, 0x00f40188),
113 SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL4, 0xf0ffffff, 0x8000b007),
114 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf1ffffff, 0x00880007),
115 SOC15_REG_GOLDEN_VALUE(GC, 0, regPC_CONFIG_CNTL_1, 0xffffffff, 0x00010000),
116 SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000),
117 SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL2, 0x007f0000, 0x00000000),
118 SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xffcfffff, 0x0000200a),
119 SOC15_REG_GOLDEN_VALUE(GC, 0, regUTCL1_CTRL_2, 0xffffffff, 0x0000048f)
122 #define DEFAULT_SH_MEM_CONFIG \
123 ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
124 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
125 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
127 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev);
128 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev);
129 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev);
130 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev);
131 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev);
132 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev);
133 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev);
134 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
135 struct amdgpu_cu_info *cu_info);
136 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev);
137 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
138 u32 sh_num, u32 instance, int xcc_id);
139 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev);
141 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
142 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
143 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
145 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
146 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
147 uint16_t pasid, uint32_t flush_type,
148 bool all_hub, uint8_t dst_sel);
149 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
150 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
151 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
154 static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
156 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
157 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
158 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
159 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
160 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
161 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
162 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
163 amdgpu_ring_write(kiq_ring, 0); /* oac mask */
164 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
167 static void gfx11_kiq_map_queues(struct amdgpu_ring *kiq_ring,
168 struct amdgpu_ring *ring)
170 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
171 uint64_t wptr_addr = ring->wptr_gpu_addr;
172 uint32_t me = 0, eng_sel = 0;
174 switch (ring->funcs->type) {
175 case AMDGPU_RING_TYPE_COMPUTE:
179 case AMDGPU_RING_TYPE_GFX:
183 case AMDGPU_RING_TYPE_MES:
191 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
192 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
193 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
194 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
195 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
196 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
197 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
198 PACKET3_MAP_QUEUES_ME((me)) |
199 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
200 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
201 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
202 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
203 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
204 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
205 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
206 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
207 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
210 static void gfx11_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
211 struct amdgpu_ring *ring,
212 enum amdgpu_unmap_queues_action action,
213 u64 gpu_addr, u64 seq)
215 struct amdgpu_device *adev = kiq_ring->adev;
216 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
218 if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
219 amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq);
223 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
224 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
225 PACKET3_UNMAP_QUEUES_ACTION(action) |
226 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
227 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
228 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
229 amdgpu_ring_write(kiq_ring,
230 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
232 if (action == PREEMPT_QUEUES_NO_UNMAP) {
233 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
234 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
235 amdgpu_ring_write(kiq_ring, seq);
237 amdgpu_ring_write(kiq_ring, 0);
238 amdgpu_ring_write(kiq_ring, 0);
239 amdgpu_ring_write(kiq_ring, 0);
243 static void gfx11_kiq_query_status(struct amdgpu_ring *kiq_ring,
244 struct amdgpu_ring *ring,
248 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
250 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
251 amdgpu_ring_write(kiq_ring,
252 PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
253 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
254 PACKET3_QUERY_STATUS_COMMAND(2));
255 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
256 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
257 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
258 amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
259 amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
260 amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
261 amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
264 static void gfx11_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
265 uint16_t pasid, uint32_t flush_type,
268 gfx_v11_0_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1);
271 static const struct kiq_pm4_funcs gfx_v11_0_kiq_pm4_funcs = {
272 .kiq_set_resources = gfx11_kiq_set_resources,
273 .kiq_map_queues = gfx11_kiq_map_queues,
274 .kiq_unmap_queues = gfx11_kiq_unmap_queues,
275 .kiq_query_status = gfx11_kiq_query_status,
276 .kiq_invalidate_tlbs = gfx11_kiq_invalidate_tlbs,
277 .set_resources_size = 8,
278 .map_queues_size = 7,
279 .unmap_queues_size = 6,
280 .query_status_size = 7,
281 .invalidate_tlbs_size = 2,
284 static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
286 adev->gfx.kiq[0].pmf = &gfx_v11_0_kiq_pm4_funcs;
289 static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
291 switch (adev->ip_versions[GC_HWIP][0]) {
292 case IP_VERSION(11, 0, 1):
293 case IP_VERSION(11, 0, 4):
294 soc15_program_register_sequence(adev,
295 golden_settings_gc_11_0_1,
296 (const u32)ARRAY_SIZE(golden_settings_gc_11_0_1));
298 case IP_VERSION(11, 5, 0):
299 soc15_program_register_sequence(adev,
300 golden_settings_gc_11_5_0,
301 (const u32)ARRAY_SIZE(golden_settings_gc_11_5_0));
308 static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
309 bool wc, uint32_t reg, uint32_t val)
311 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
312 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
313 WRITE_DATA_DST_SEL(0) | (wc ? WR_CONFIRM : 0));
314 amdgpu_ring_write(ring, reg);
315 amdgpu_ring_write(ring, 0);
316 amdgpu_ring_write(ring, val);
319 static void gfx_v11_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
320 int mem_space, int opt, uint32_t addr0,
321 uint32_t addr1, uint32_t ref, uint32_t mask,
324 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
325 amdgpu_ring_write(ring,
326 /* memory (1) or register (0) */
327 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
328 WAIT_REG_MEM_OPERATION(opt) | /* wait */
329 WAIT_REG_MEM_FUNCTION(3) | /* equal */
330 WAIT_REG_MEM_ENGINE(eng_sel)));
333 BUG_ON(addr0 & 0x3); /* Dword align */
334 amdgpu_ring_write(ring, addr0);
335 amdgpu_ring_write(ring, addr1);
336 amdgpu_ring_write(ring, ref);
337 amdgpu_ring_write(ring, mask);
338 amdgpu_ring_write(ring, inv); /* poll interval */
341 static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring)
343 struct amdgpu_device *adev = ring->adev;
344 uint32_t scratch = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
349 WREG32(scratch, 0xCAFEDEAD);
350 r = amdgpu_ring_alloc(ring, 5);
352 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
357 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) {
358 gfx_v11_0_ring_emit_wreg(ring, scratch, 0xDEADBEEF);
360 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
361 amdgpu_ring_write(ring, scratch -
362 PACKET3_SET_UCONFIG_REG_START);
363 amdgpu_ring_write(ring, 0xDEADBEEF);
365 amdgpu_ring_commit(ring);
367 for (i = 0; i < adev->usec_timeout; i++) {
368 tmp = RREG32(scratch);
369 if (tmp == 0xDEADBEEF)
371 if (amdgpu_emu_mode == 1)
377 if (i >= adev->usec_timeout)
382 static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
384 struct amdgpu_device *adev = ring->adev;
386 struct dma_fence *f = NULL;
389 volatile uint32_t *cpu_ptr;
392 /* MES KIQ fw hasn't indirect buffer support for now */
393 if (adev->enable_mes_kiq &&
394 ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
397 memset(&ib, 0, sizeof(ib));
399 if (ring->is_mes_queue) {
400 uint32_t padding, offset;
402 offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
403 padding = amdgpu_mes_ctx_get_offs(ring,
404 AMDGPU_MES_CTX_PADDING_OFFS);
406 ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
407 ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
409 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding);
410 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding);
411 *cpu_ptr = cpu_to_le32(0xCAFEDEAD);
413 r = amdgpu_device_wb_get(adev, &index);
417 gpu_addr = adev->wb.gpu_addr + (index * 4);
418 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
419 cpu_ptr = &adev->wb.wb[index];
421 r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
423 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
428 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
429 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
430 ib.ptr[2] = lower_32_bits(gpu_addr);
431 ib.ptr[3] = upper_32_bits(gpu_addr);
432 ib.ptr[4] = 0xDEADBEEF;
435 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
439 r = dma_fence_wait_timeout(f, false, timeout);
447 if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF)
452 if (!ring->is_mes_queue)
453 amdgpu_ib_free(adev, &ib, NULL);
456 if (!ring->is_mes_queue)
457 amdgpu_device_wb_free(adev, index);
461 static void gfx_v11_0_free_microcode(struct amdgpu_device *adev)
463 amdgpu_ucode_release(&adev->gfx.pfp_fw);
464 amdgpu_ucode_release(&adev->gfx.me_fw);
465 amdgpu_ucode_release(&adev->gfx.rlc_fw);
466 amdgpu_ucode_release(&adev->gfx.mec_fw);
468 kfree(adev->gfx.rlc.register_list_format);
471 static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev, const char *ucode_prefix)
473 const struct psp_firmware_header_v1_0 *toc_hdr;
477 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", ucode_prefix);
478 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name);
482 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
483 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
484 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
485 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
486 adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
487 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
490 amdgpu_ucode_release(&adev->psp.toc_fw);
494 static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev)
496 switch (adev->ip_versions[GC_HWIP][0]) {
497 case IP_VERSION(11, 0, 0):
498 case IP_VERSION(11, 0, 2):
499 case IP_VERSION(11, 0, 3):
500 if ((adev->gfx.me_fw_version >= 1505) &&
501 (adev->gfx.pfp_fw_version >= 1600) &&
502 (adev->gfx.mec_fw_version >= 512)) {
503 if (amdgpu_sriov_vf(adev))
504 adev->gfx.cp_gfx_shadow = true;
506 adev->gfx.cp_gfx_shadow = false;
510 adev->gfx.cp_gfx_shadow = false;
515 static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
518 char ucode_prefix[30];
520 const struct rlc_firmware_header_v2_0 *rlc_hdr;
521 uint16_t version_major;
522 uint16_t version_minor;
526 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
528 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", ucode_prefix);
529 err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
532 /* check pfp fw hdr version to decide if enable rs64 for gfx11.*/
533 adev->gfx.rs64_enable = amdgpu_ucode_hdr_version(
534 (union amdgpu_firmware_header *)
535 adev->gfx.pfp_fw->data, 2, 0);
536 if (adev->gfx.rs64_enable) {
537 dev_info(adev->dev, "CP RS64 enable\n");
538 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP);
539 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK);
540 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK);
542 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
545 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", ucode_prefix);
546 err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
549 if (adev->gfx.rs64_enable) {
550 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME);
551 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK);
552 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK);
554 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
557 if (!amdgpu_sriov_vf(adev)) {
558 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix);
559 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
562 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
563 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
564 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
565 err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
570 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", ucode_prefix);
571 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
574 if (adev->gfx.rs64_enable) {
575 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC);
576 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK);
577 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK);
578 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK);
579 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK);
581 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
582 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
585 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
586 err = gfx_v11_0_init_toc_microcode(adev, ucode_prefix);
588 /* only one MEC for gfx 11.0.0. */
589 adev->gfx.mec2_fw = NULL;
591 gfx_v11_0_check_fw_cp_gfx_shadow(adev);
594 amdgpu_ucode_release(&adev->gfx.pfp_fw);
595 amdgpu_ucode_release(&adev->gfx.me_fw);
596 amdgpu_ucode_release(&adev->gfx.rlc_fw);
597 amdgpu_ucode_release(&adev->gfx.mec_fw);
603 static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev)
606 const struct cs_section_def *sect = NULL;
607 const struct cs_extent_def *ext = NULL;
609 /* begin clear state */
611 /* context control state */
614 for (sect = gfx11_cs_data; sect->section != NULL; ++sect) {
615 for (ext = sect->section; ext->extent != NULL; ++ext) {
616 if (sect->id == SECT_CONTEXT)
617 count += 2 + ext->reg_count;
623 /* set PA_SC_TILE_STEERING_OVERRIDE */
625 /* end clear state */
633 static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev,
634 volatile u32 *buffer)
637 const struct cs_section_def *sect = NULL;
638 const struct cs_extent_def *ext = NULL;
641 if (adev->gfx.rlc.cs_data == NULL)
646 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
647 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
649 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
650 buffer[count++] = cpu_to_le32(0x80000000);
651 buffer[count++] = cpu_to_le32(0x80000000);
653 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
654 for (ext = sect->section; ext->extent != NULL; ++ext) {
655 if (sect->id == SECT_CONTEXT) {
657 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
658 buffer[count++] = cpu_to_le32(ext->reg_index -
659 PACKET3_SET_CONTEXT_REG_START);
660 for (i = 0; i < ext->reg_count; i++)
661 buffer[count++] = cpu_to_le32(ext->extent[i]);
669 SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
670 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
671 buffer[count++] = cpu_to_le32(ctx_reg_offset);
672 buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override);
674 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
675 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
677 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
678 buffer[count++] = cpu_to_le32(0);
681 static void gfx_v11_0_rlc_fini(struct amdgpu_device *adev)
683 /* clear state block */
684 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
685 &adev->gfx.rlc.clear_state_gpu_addr,
686 (void **)&adev->gfx.rlc.cs_ptr);
688 /* jump table block */
689 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
690 &adev->gfx.rlc.cp_table_gpu_addr,
691 (void **)&adev->gfx.rlc.cp_table_ptr);
694 static void gfx_v11_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
696 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
698 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0];
699 reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
700 reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG1);
701 reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG2);
702 reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG3);
703 reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_CNTL);
704 reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_INDEX);
705 reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, regRLC_SPARE_INT_0);
706 adev->gfx.rlc.rlcg_reg_access_supported = true;
709 static int gfx_v11_0_rlc_init(struct amdgpu_device *adev)
711 const struct cs_section_def *cs_data;
714 adev->gfx.rlc.cs_data = gfx11_cs_data;
716 cs_data = adev->gfx.rlc.cs_data;
719 /* init clear state block */
720 r = amdgpu_gfx_rlc_init_csb(adev);
725 /* init spm vmid with 0xf */
726 if (adev->gfx.rlc.funcs->update_spm_vmid)
727 adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
732 static void gfx_v11_0_mec_fini(struct amdgpu_device *adev)
734 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
735 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
736 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL);
739 static void gfx_v11_0_me_init(struct amdgpu_device *adev)
741 bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
743 amdgpu_gfx_graphics_queue_acquire(adev);
746 static int gfx_v11_0_mec_init(struct amdgpu_device *adev)
752 bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
754 /* take ownership of the relevant compute queues */
755 amdgpu_gfx_compute_queue_acquire(adev);
756 mec_hpd_size = adev->gfx.num_compute_rings * GFX11_MEC_HPD_SIZE;
759 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
760 AMDGPU_GEM_DOMAIN_GTT,
761 &adev->gfx.mec.hpd_eop_obj,
762 &adev->gfx.mec.hpd_eop_gpu_addr,
765 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
766 gfx_v11_0_mec_fini(adev);
770 memset(hpd, 0, mec_hpd_size);
772 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
773 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
779 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address)
781 WREG32_SOC15(GC, 0, regSQ_IND_INDEX,
782 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
783 (address << SQ_IND_INDEX__INDEX__SHIFT));
784 return RREG32_SOC15(GC, 0, regSQ_IND_DATA);
787 static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave,
788 uint32_t thread, uint32_t regno,
789 uint32_t num, uint32_t *out)
791 WREG32_SOC15(GC, 0, regSQ_IND_INDEX,
792 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
793 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
794 (thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) |
795 (SQ_IND_INDEX__AUTO_INCR_MASK));
797 *(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA);
800 static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
802 /* in gfx11 the SIMD_ID is specified as part of the INSTANCE
803 * field when performing a select_se_sh so it should be
807 /* type 3 wave data */
808 dst[(*no_fields)++] = 3;
809 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS);
810 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO);
811 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI);
812 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO);
813 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI);
814 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1);
815 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2);
816 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC);
817 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC);
818 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAPSTS);
819 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS);
820 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2);
821 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1);
822 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0);
823 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE);
826 static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
827 uint32_t wave, uint32_t start,
828 uint32_t size, uint32_t *dst)
833 adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size,
837 static void gfx_v11_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
838 uint32_t wave, uint32_t thread,
839 uint32_t start, uint32_t size,
844 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
847 static void gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev,
848 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
850 soc21_grbm_select(adev, me, pipe, q, vm);
853 /* all sizes are in bytes */
854 #define MQD_SHADOW_BASE_SIZE 73728
855 #define MQD_SHADOW_BASE_ALIGNMENT 256
856 #define MQD_FWWORKAREA_SIZE 484
857 #define MQD_FWWORKAREA_ALIGNMENT 256
859 static int gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device *adev,
860 struct amdgpu_gfx_shadow_info *shadow_info)
862 if (adev->gfx.cp_gfx_shadow) {
863 shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE;
864 shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT;
865 shadow_info->csa_size = MQD_FWWORKAREA_SIZE;
866 shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT;
869 memset(shadow_info, 0, sizeof(struct amdgpu_gfx_shadow_info));
874 static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = {
875 .get_gpu_clock_counter = &gfx_v11_0_get_gpu_clock_counter,
876 .select_se_sh = &gfx_v11_0_select_se_sh,
877 .read_wave_data = &gfx_v11_0_read_wave_data,
878 .read_wave_sgprs = &gfx_v11_0_read_wave_sgprs,
879 .read_wave_vgprs = &gfx_v11_0_read_wave_vgprs,
880 .select_me_pipe_q = &gfx_v11_0_select_me_pipe_q,
881 .update_perfmon_mgcg = &gfx_v11_0_update_perf_clk,
882 .get_gfx_shadow_info = &gfx_v11_0_get_gfx_shadow_info,
885 static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
888 switch (adev->ip_versions[GC_HWIP][0]) {
889 case IP_VERSION(11, 0, 0):
890 case IP_VERSION(11, 0, 2):
891 adev->gfx.config.max_hw_contexts = 8;
892 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
893 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
894 adev->gfx.config.sc_hiz_tile_fifo_size = 0;
895 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
897 case IP_VERSION(11, 0, 3):
898 adev->gfx.ras = &gfx_v11_0_3_ras;
899 adev->gfx.config.max_hw_contexts = 8;
900 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
901 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
902 adev->gfx.config.sc_hiz_tile_fifo_size = 0;
903 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
905 case IP_VERSION(11, 0, 1):
906 case IP_VERSION(11, 0, 4):
907 case IP_VERSION(11, 5, 0):
908 adev->gfx.config.max_hw_contexts = 8;
909 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
910 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
911 adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
912 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x300;
922 static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
923 int me, int pipe, int queue)
926 struct amdgpu_ring *ring;
927 unsigned int irq_type;
929 ring = &adev->gfx.gfx_ring[ring_id];
935 ring->ring_obj = NULL;
936 ring->use_doorbell = true;
939 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
941 ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1;
942 ring->vm_hub = AMDGPU_GFXHUB(0);
943 sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
945 irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
946 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
947 AMDGPU_RING_PRIO_DEFAULT, NULL);
953 static int gfx_v11_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
954 int mec, int pipe, int queue)
958 struct amdgpu_ring *ring;
959 unsigned int hw_prio;
961 ring = &adev->gfx.compute_ring[ring_id];
968 ring->ring_obj = NULL;
969 ring->use_doorbell = true;
970 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
971 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
972 + (ring_id * GFX11_MEC_HPD_SIZE);
973 ring->vm_hub = AMDGPU_GFXHUB(0);
974 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
976 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
977 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
979 hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
980 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
981 /* type-2 packets are deprecated on MEC, use type-3 instead */
982 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
991 SOC21_FIRMWARE_ID id;
994 } rlc_autoload_info[SOC21_FIRMWARE_ID_MAX];
996 static void gfx_v11_0_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc)
998 RLC_TABLE_OF_CONTENT *ucode = rlc_toc;
1000 while (ucode && (ucode->id > SOC21_FIRMWARE_ID_INVALID) &&
1001 (ucode->id < SOC21_FIRMWARE_ID_MAX)) {
1002 rlc_autoload_info[ucode->id].id = ucode->id;
1003 rlc_autoload_info[ucode->id].offset = ucode->offset * 4;
1004 rlc_autoload_info[ucode->id].size = ucode->size * 4;
1010 static uint32_t gfx_v11_0_calc_toc_total_size(struct amdgpu_device *adev)
1012 uint32_t total_size = 0;
1013 SOC21_FIRMWARE_ID id;
1015 gfx_v11_0_parse_rlc_toc(adev, adev->psp.toc.start_addr);
1017 for (id = SOC21_FIRMWARE_ID_RLC_G_UCODE; id < SOC21_FIRMWARE_ID_MAX; id++)
1018 total_size += rlc_autoload_info[id].size;
1020 /* In case the offset in rlc toc ucode is aligned */
1021 if (total_size < rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset)
1022 total_size = rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset +
1023 rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].size;
1028 static int gfx_v11_0_rlc_autoload_buffer_init(struct amdgpu_device *adev)
1031 uint32_t total_size;
1033 total_size = gfx_v11_0_calc_toc_total_size(adev);
1035 r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024,
1036 AMDGPU_GEM_DOMAIN_VRAM |
1037 AMDGPU_GEM_DOMAIN_GTT,
1038 &adev->gfx.rlc.rlc_autoload_bo,
1039 &adev->gfx.rlc.rlc_autoload_gpu_addr,
1040 (void **)&adev->gfx.rlc.rlc_autoload_ptr);
1043 dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r);
1050 static void gfx_v11_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev,
1051 SOC21_FIRMWARE_ID id,
1052 const void *fw_data,
1054 uint32_t *fw_autoload_mask)
1056 uint32_t toc_offset;
1057 uint32_t toc_fw_size;
1058 char *ptr = adev->gfx.rlc.rlc_autoload_ptr;
1060 if (id <= SOC21_FIRMWARE_ID_INVALID || id >= SOC21_FIRMWARE_ID_MAX)
1063 toc_offset = rlc_autoload_info[id].offset;
1064 toc_fw_size = rlc_autoload_info[id].size;
1067 fw_size = toc_fw_size;
1069 if (fw_size > toc_fw_size)
1070 fw_size = toc_fw_size;
1072 memcpy(ptr + toc_offset, fw_data, fw_size);
1074 if (fw_size < toc_fw_size)
1075 memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size);
1077 if ((id != SOC21_FIRMWARE_ID_RS64_PFP) && (id != SOC21_FIRMWARE_ID_RS64_ME))
1078 *(uint64_t *)fw_autoload_mask |= 1ULL << id;
1081 static void gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev,
1082 uint32_t *fw_autoload_mask)
1088 *(uint64_t *)fw_autoload_mask |= 0x1;
1090 DRM_DEBUG("rlc autoload enabled fw: 0x%llx\n", *(uint64_t *)fw_autoload_mask);
1092 data = adev->psp.toc.start_addr;
1093 size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_TOC].size;
1095 toc_ptr = (uint64_t *)data + size / 8 - 1;
1096 *toc_ptr = *(uint64_t *)fw_autoload_mask;
1098 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_TOC,
1099 data, size, fw_autoload_mask);
1102 static void gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev,
1103 uint32_t *fw_autoload_mask)
1105 const __le32 *fw_data;
1107 const struct gfx_firmware_header_v1_0 *cp_hdr;
1108 const struct gfx_firmware_header_v2_0 *cpv2_hdr;
1109 const struct rlc_firmware_header_v2_0 *rlc_hdr;
1110 const struct rlc_firmware_header_v2_2 *rlcv22_hdr;
1111 uint16_t version_major, version_minor;
1113 if (adev->gfx.rs64_enable) {
1115 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1116 adev->gfx.pfp_fw->data;
1118 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1119 le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1120 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1121 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP,
1122 fw_data, fw_size, fw_autoload_mask);
1124 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1125 le32_to_cpu(cpv2_hdr->data_offset_bytes));
1126 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1127 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK,
1128 fw_data, fw_size, fw_autoload_mask);
1129 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P1_STACK,
1130 fw_data, fw_size, fw_autoload_mask);
1132 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1133 adev->gfx.me_fw->data;
1135 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1136 le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1137 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1138 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME,
1139 fw_data, fw_size, fw_autoload_mask);
1141 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1142 le32_to_cpu(cpv2_hdr->data_offset_bytes));
1143 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1144 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P0_STACK,
1145 fw_data, fw_size, fw_autoload_mask);
1146 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P1_STACK,
1147 fw_data, fw_size, fw_autoload_mask);
1149 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1150 adev->gfx.mec_fw->data;
1152 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1153 le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1154 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1155 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC,
1156 fw_data, fw_size, fw_autoload_mask);
1158 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1159 le32_to_cpu(cpv2_hdr->data_offset_bytes));
1160 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1161 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK,
1162 fw_data, fw_size, fw_autoload_mask);
1163 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P1_STACK,
1164 fw_data, fw_size, fw_autoload_mask);
1165 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P2_STACK,
1166 fw_data, fw_size, fw_autoload_mask);
1167 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P3_STACK,
1168 fw_data, fw_size, fw_autoload_mask);
1171 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1172 adev->gfx.pfp_fw->data;
1173 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1174 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1175 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1176 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_PFP,
1177 fw_data, fw_size, fw_autoload_mask);
1180 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1181 adev->gfx.me_fw->data;
1182 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1183 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1184 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1185 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_ME,
1186 fw_data, fw_size, fw_autoload_mask);
1189 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1190 adev->gfx.mec_fw->data;
1191 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1192 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1193 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1194 cp_hdr->jt_size * 4;
1195 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_MEC,
1196 fw_data, fw_size, fw_autoload_mask);
1200 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)
1201 adev->gfx.rlc_fw->data;
1202 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1203 le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes));
1204 fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes);
1205 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_G_UCODE,
1206 fw_data, fw_size, fw_autoload_mask);
1208 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1209 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1210 if (version_major == 2) {
1211 if (version_minor >= 2) {
1212 rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
1214 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1215 le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes));
1216 fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes);
1217 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_UCODE,
1218 fw_data, fw_size, fw_autoload_mask);
1220 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1221 le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes));
1222 fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes);
1223 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_DRAM_BOOT,
1224 fw_data, fw_size, fw_autoload_mask);
1229 static void gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev,
1230 uint32_t *fw_autoload_mask)
1232 const __le32 *fw_data;
1234 const struct sdma_firmware_header_v2_0 *sdma_hdr;
1236 sdma_hdr = (const struct sdma_firmware_header_v2_0 *)
1237 adev->sdma.instance[0].fw->data;
1238 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data +
1239 le32_to_cpu(sdma_hdr->header.ucode_array_offset_bytes));
1240 fw_size = le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes);
1242 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1243 SOC21_FIRMWARE_ID_SDMA_UCODE_TH0, fw_data, fw_size, fw_autoload_mask);
1245 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data +
1246 le32_to_cpu(sdma_hdr->ctl_ucode_offset));
1247 fw_size = le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes);
1249 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1250 SOC21_FIRMWARE_ID_SDMA_UCODE_TH1, fw_data, fw_size, fw_autoload_mask);
1253 static void gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev,
1254 uint32_t *fw_autoload_mask)
1256 const __le32 *fw_data;
1258 const struct mes_firmware_header_v1_0 *mes_hdr;
1259 int pipe, ucode_id, data_id;
1261 for (pipe = 0; pipe < 2; pipe++) {
1263 ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P0;
1264 data_id = SOC21_FIRMWARE_ID_RS64_MES_P0_STACK;
1266 ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P1;
1267 data_id = SOC21_FIRMWARE_ID_RS64_MES_P1_STACK;
1270 mes_hdr = (const struct mes_firmware_header_v1_0 *)
1271 adev->mes.fw[pipe]->data;
1273 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1274 le32_to_cpu(mes_hdr->mes_ucode_offset_bytes));
1275 fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
1277 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1278 ucode_id, fw_data, fw_size, fw_autoload_mask);
1280 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1281 le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes));
1282 fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
1284 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1285 data_id, fw_data, fw_size, fw_autoload_mask);
1289 static int gfx_v11_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev)
1291 uint32_t rlc_g_offset, rlc_g_size;
1293 uint32_t autoload_fw_id[2];
1295 memset(autoload_fw_id, 0, sizeof(uint32_t) * 2);
1297 /* RLC autoload sequence 2: copy ucode */
1298 gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(adev, autoload_fw_id);
1299 gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(adev, autoload_fw_id);
1300 gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(adev, autoload_fw_id);
1301 gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(adev, autoload_fw_id);
1303 rlc_g_offset = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].offset;
1304 rlc_g_size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].size;
1305 gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset;
1307 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_HI, upper_32_bits(gpu_addr));
1308 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_LO, lower_32_bits(gpu_addr));
1310 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_SIZE, rlc_g_size);
1312 /* RLC autoload sequence 3: load IMU fw */
1313 if (adev->gfx.imu.funcs->load_microcode)
1314 adev->gfx.imu.funcs->load_microcode(adev);
1315 /* RLC autoload sequence 4 init IMU fw */
1316 if (adev->gfx.imu.funcs->setup_imu)
1317 adev->gfx.imu.funcs->setup_imu(adev);
1318 if (adev->gfx.imu.funcs->start_imu)
1319 adev->gfx.imu.funcs->start_imu(adev);
1321 /* RLC autoload sequence 5 disable gpa mode */
1322 gfx_v11_0_disable_gpa_mode(adev);
1327 static int gfx_v11_0_sw_init(void *handle)
1329 int i, j, k, r, ring_id = 0;
1330 struct amdgpu_kiq *kiq;
1331 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1333 adev->gfxhub.funcs->init(adev);
1335 switch (adev->ip_versions[GC_HWIP][0]) {
1336 case IP_VERSION(11, 0, 0):
1337 case IP_VERSION(11, 0, 2):
1338 case IP_VERSION(11, 0, 3):
1339 adev->gfx.me.num_me = 1;
1340 adev->gfx.me.num_pipe_per_me = 1;
1341 adev->gfx.me.num_queue_per_pipe = 1;
1342 adev->gfx.mec.num_mec = 2;
1343 adev->gfx.mec.num_pipe_per_mec = 4;
1344 adev->gfx.mec.num_queue_per_pipe = 4;
1346 case IP_VERSION(11, 0, 1):
1347 case IP_VERSION(11, 0, 4):
1348 case IP_VERSION(11, 5, 0):
1349 adev->gfx.me.num_me = 1;
1350 adev->gfx.me.num_pipe_per_me = 1;
1351 adev->gfx.me.num_queue_per_pipe = 1;
1352 adev->gfx.mec.num_mec = 1;
1353 adev->gfx.mec.num_pipe_per_mec = 4;
1354 adev->gfx.mec.num_queue_per_pipe = 4;
1357 adev->gfx.me.num_me = 1;
1358 adev->gfx.me.num_pipe_per_me = 1;
1359 adev->gfx.me.num_queue_per_pipe = 1;
1360 adev->gfx.mec.num_mec = 1;
1361 adev->gfx.mec.num_pipe_per_mec = 4;
1362 adev->gfx.mec.num_queue_per_pipe = 8;
1366 /* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */
1367 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3) &&
1368 amdgpu_sriov_is_pp_one_vf(adev))
1369 adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG;
1372 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1373 GFX_11_0_0__SRCID__CP_EOP_INTERRUPT,
1374 &adev->gfx.eop_irq);
1378 /* Privileged reg */
1379 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1380 GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT,
1381 &adev->gfx.priv_reg_irq);
1385 /* Privileged inst */
1386 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1387 GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT,
1388 &adev->gfx.priv_inst_irq);
1393 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
1394 GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT,
1395 &adev->gfx.rlc_gc_fed_irq);
1399 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1401 if (adev->gfx.imu.funcs) {
1402 if (adev->gfx.imu.funcs->init_microcode) {
1403 r = adev->gfx.imu.funcs->init_microcode(adev);
1405 DRM_ERROR("Failed to load imu firmware!\n");
1409 gfx_v11_0_me_init(adev);
1411 r = gfx_v11_0_rlc_init(adev);
1413 DRM_ERROR("Failed to init rlc BOs!\n");
1417 r = gfx_v11_0_mec_init(adev);
1419 DRM_ERROR("Failed to init MEC BOs!\n");
1423 /* set up the gfx ring */
1424 for (i = 0; i < adev->gfx.me.num_me; i++) {
1425 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
1426 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
1427 if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
1430 r = gfx_v11_0_gfx_ring_init(adev, ring_id,
1440 /* set up the compute queues - allocate horizontally across pipes */
1441 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1442 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1443 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1444 if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
1448 r = gfx_v11_0_compute_ring_init(adev, ring_id,
1458 if (!adev->enable_mes_kiq) {
1459 r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE, 0);
1461 DRM_ERROR("Failed to init KIQ BOs!\n");
1465 kiq = &adev->gfx.kiq[0];
1466 r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0);
1471 r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v11_compute_mqd), 0);
1475 /* allocate visible FB for rlc auto-loading fw */
1476 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1477 r = gfx_v11_0_rlc_autoload_buffer_init(adev);
1482 r = gfx_v11_0_gpu_early_init(adev);
1486 if (amdgpu_gfx_ras_sw_init(adev)) {
1487 dev_err(adev->dev, "Failed to initialize gfx ras block!\n");
1494 static void gfx_v11_0_pfp_fini(struct amdgpu_device *adev)
1496 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj,
1497 &adev->gfx.pfp.pfp_fw_gpu_addr,
1498 (void **)&adev->gfx.pfp.pfp_fw_ptr);
1500 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_data_obj,
1501 &adev->gfx.pfp.pfp_fw_data_gpu_addr,
1502 (void **)&adev->gfx.pfp.pfp_fw_data_ptr);
1505 static void gfx_v11_0_me_fini(struct amdgpu_device *adev)
1507 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj,
1508 &adev->gfx.me.me_fw_gpu_addr,
1509 (void **)&adev->gfx.me.me_fw_ptr);
1511 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_data_obj,
1512 &adev->gfx.me.me_fw_data_gpu_addr,
1513 (void **)&adev->gfx.me.me_fw_data_ptr);
1516 static void gfx_v11_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev)
1518 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo,
1519 &adev->gfx.rlc.rlc_autoload_gpu_addr,
1520 (void **)&adev->gfx.rlc.rlc_autoload_ptr);
1523 static int gfx_v11_0_sw_fini(void *handle)
1526 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1528 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1529 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1530 for (i = 0; i < adev->gfx.num_compute_rings; i++)
1531 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1533 amdgpu_gfx_mqd_sw_fini(adev, 0);
1535 if (!adev->enable_mes_kiq) {
1536 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
1537 amdgpu_gfx_kiq_fini(adev, 0);
1540 gfx_v11_0_pfp_fini(adev);
1541 gfx_v11_0_me_fini(adev);
1542 gfx_v11_0_rlc_fini(adev);
1543 gfx_v11_0_mec_fini(adev);
1545 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
1546 gfx_v11_0_rlc_autoload_buffer_fini(adev);
1548 gfx_v11_0_free_microcode(adev);
1553 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
1554 u32 sh_num, u32 instance, int xcc_id)
1558 if (instance == 0xffffffff)
1559 data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
1560 INSTANCE_BROADCAST_WRITES, 1);
1562 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
1565 if (se_num == 0xffffffff)
1566 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
1569 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1571 if (sh_num == 0xffffffff)
1572 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES,
1575 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num);
1577 WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, data);
1580 static u32 gfx_v11_0_get_sa_active_bitmap(struct amdgpu_device *adev)
1582 u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask;
1584 gc_disabled_sa_mask = RREG32_SOC15(GC, 0, regCC_GC_SA_UNIT_DISABLE);
1585 gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask,
1586 CC_GC_SA_UNIT_DISABLE,
1588 gc_user_disabled_sa_mask = RREG32_SOC15(GC, 0, regGC_USER_SA_UNIT_DISABLE);
1589 gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask,
1590 GC_USER_SA_UNIT_DISABLE,
1592 sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se *
1593 adev->gfx.config.max_shader_engines);
1595 return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask));
1598 static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1600 u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask;
1603 gc_disabled_rb_mask = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE);
1604 gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask,
1605 CC_RB_BACKEND_DISABLE,
1607 gc_user_disabled_rb_mask = RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE);
1608 gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask,
1609 GC_USER_RB_BACKEND_DISABLE,
1611 rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se *
1612 adev->gfx.config.max_shader_engines);
1614 return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask));
1617 static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
1619 u32 rb_bitmap_width_per_sa;
1621 u32 active_sa_bitmap;
1622 u32 global_active_rb_bitmap;
1623 u32 active_rb_bitmap = 0;
1626 /* query sa bitmap from SA_UNIT_DISABLE registers */
1627 active_sa_bitmap = gfx_v11_0_get_sa_active_bitmap(adev);
1628 /* query rb bitmap from RB_BACKEND_DISABLE registers */
1629 global_active_rb_bitmap = gfx_v11_0_get_rb_active_bitmap(adev);
1631 /* generate active rb bitmap according to active sa bitmap */
1632 max_sa = adev->gfx.config.max_shader_engines *
1633 adev->gfx.config.max_sh_per_se;
1634 rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
1635 adev->gfx.config.max_sh_per_se;
1636 for (i = 0; i < max_sa; i++) {
1637 if (active_sa_bitmap & (1 << i))
1638 active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
1641 active_rb_bitmap |= global_active_rb_bitmap;
1642 adev->gfx.config.backend_enable_mask = active_rb_bitmap;
1643 adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
1646 #define DEFAULT_SH_MEM_BASES (0x6000)
1647 #define LDS_APP_BASE 0x1
1648 #define SCRATCH_APP_BASE 0x2
1650 static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev)
1653 uint32_t sh_mem_bases;
1657 * Configure apertures:
1658 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1659 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1660 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1662 sh_mem_bases = (LDS_APP_BASE << SH_MEM_BASES__SHARED_BASE__SHIFT) |
1665 mutex_lock(&adev->srbm_mutex);
1666 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1667 soc21_grbm_select(adev, 0, 0, 0, i);
1668 /* CP and shaders */
1669 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1670 WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases);
1672 /* Enable trap for each kfd vmid. */
1673 data = RREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL);
1674 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
1675 WREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL, data);
1677 soc21_grbm_select(adev, 0, 0, 0, 0);
1678 mutex_unlock(&adev->srbm_mutex);
1680 /* Initialize all compute VMIDs to have no GDS, GWS, or OA
1681 acccess. These should be enabled by FW for target VMIDs. */
1682 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1683 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * i, 0);
1684 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * i, 0);
1685 WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, i, 0);
1686 WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, i, 0);
1690 static void gfx_v11_0_init_gds_vmid(struct amdgpu_device *adev)
1695 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
1696 * access. Compute VMIDs should be enabled by FW for target VMIDs,
1697 * the driver can enable them for graphics. VMID0 should maintain
1698 * access so that HWS firmware can save/restore entries.
1700 for (vmid = 1; vmid < 16; vmid++) {
1701 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * vmid, 0);
1702 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * vmid, 0);
1703 WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, vmid, 0);
1704 WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, vmid, 0);
1708 static void gfx_v11_0_tcp_harvest(struct amdgpu_device *adev)
1710 /* TODO: harvest feature to be added later. */
1713 static void gfx_v11_0_get_tcc_info(struct amdgpu_device *adev)
1715 /* TCCs are global (not instanced). */
1716 uint32_t tcc_disable = RREG32_SOC15(GC, 0, regCGTS_TCC_DISABLE) |
1717 RREG32_SOC15(GC, 0, regCGTS_USER_TCC_DISABLE);
1719 adev->gfx.config.tcc_disabled_mask =
1720 REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) |
1721 (REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16);
1724 static void gfx_v11_0_constants_init(struct amdgpu_device *adev)
1729 if (!amdgpu_sriov_vf(adev))
1730 WREG32_FIELD15_PREREG(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1732 gfx_v11_0_setup_rb(adev);
1733 gfx_v11_0_get_cu_info(adev, &adev->gfx.cu_info);
1734 gfx_v11_0_get_tcc_info(adev);
1735 adev->gfx.config.pa_sc_tile_steering_override = 0;
1737 /* Set whether texture coordinate truncation is conformant. */
1738 tmp = RREG32_SOC15(GC, 0, regTA_CNTL2);
1739 adev->gfx.config.ta_cntl2_truncate_coord_mode =
1740 REG_GET_FIELD(tmp, TA_CNTL2, TRUNCATE_COORD_MODE);
1742 /* XXX SH_MEM regs */
1743 /* where to put LDS, scratch, GPUVM in FSA64 space */
1744 mutex_lock(&adev->srbm_mutex);
1745 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
1746 soc21_grbm_select(adev, 0, 0, 0, i);
1747 /* CP and shaders */
1748 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1750 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1751 (adev->gmc.private_aperture_start >> 48));
1752 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1753 (adev->gmc.shared_aperture_start >> 48));
1754 WREG32_SOC15(GC, 0, regSH_MEM_BASES, tmp);
1757 soc21_grbm_select(adev, 0, 0, 0, 0);
1759 mutex_unlock(&adev->srbm_mutex);
1761 gfx_v11_0_init_compute_vmid(adev);
1762 gfx_v11_0_init_gds_vmid(adev);
1765 static void gfx_v11_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1770 if (amdgpu_sriov_vf(adev))
1773 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0);
1775 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
1777 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
1779 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
1781 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
1784 WREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0, tmp);
1787 static int gfx_v11_0_init_csb(struct amdgpu_device *adev)
1789 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
1791 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_HI,
1792 adev->gfx.rlc.clear_state_gpu_addr >> 32);
1793 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_LO,
1794 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1795 WREG32_SOC15(GC, 0, regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
1800 static void gfx_v11_0_rlc_stop(struct amdgpu_device *adev)
1802 u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL);
1804 tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
1805 WREG32_SOC15(GC, 0, regRLC_CNTL, tmp);
1808 static void gfx_v11_0_rlc_reset(struct amdgpu_device *adev)
1810 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
1812 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
1816 static void gfx_v11_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev,
1819 uint32_t rlc_pg_cntl;
1821 rlc_pg_cntl = RREG32_SOC15(GC, 0, regRLC_PG_CNTL);
1824 /* RLC_PG_CNTL[23] = 0 (default)
1825 * RLC will wait for handshake acks with SMU
1826 * GFXOFF will be enabled
1827 * RLC_PG_CNTL[23] = 1
1828 * RLC will not issue any message to SMU
1829 * hence no handshake between SMU & RLC
1830 * GFXOFF will be disabled
1832 rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
1834 rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
1835 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, rlc_pg_cntl);
1838 static void gfx_v11_0_rlc_start(struct amdgpu_device *adev)
1840 /* TODO: enable rlc & smu handshake until smu
1841 * and gfxoff feature works as expected */
1842 if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK))
1843 gfx_v11_0_rlc_smu_handshake_cntl(adev, false);
1845 WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
1849 static void gfx_v11_0_rlc_enable_srm(struct amdgpu_device *adev)
1853 /* enable Save Restore Machine */
1854 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL));
1855 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
1856 tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
1857 WREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL), tmp);
1860 static void gfx_v11_0_load_rlcg_microcode(struct amdgpu_device *adev)
1862 const struct rlc_firmware_header_v2_0 *hdr;
1863 const __le32 *fw_data;
1864 unsigned i, fw_size;
1866 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1867 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1868 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1869 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1871 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR,
1872 RLCG_UCODE_LOADING_START_ADDRESS);
1874 for (i = 0; i < fw_size; i++)
1875 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_DATA,
1876 le32_to_cpup(fw_data++));
1878 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
1881 static void gfx_v11_0_load_rlc_iram_dram_microcode(struct amdgpu_device *adev)
1883 const struct rlc_firmware_header_v2_2 *hdr;
1884 const __le32 *fw_data;
1885 unsigned i, fw_size;
1888 hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
1890 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1891 le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes));
1892 fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4;
1894 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, 0);
1896 for (i = 0; i < fw_size; i++) {
1897 if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1899 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_DATA,
1900 le32_to_cpup(fw_data++));
1903 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
1905 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1906 le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes));
1907 fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4;
1909 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_ADDR, 0);
1910 for (i = 0; i < fw_size; i++) {
1911 if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1913 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_DATA,
1914 le32_to_cpup(fw_data++));
1917 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
1919 tmp = RREG32_SOC15(GC, 0, regRLC_LX6_CNTL);
1920 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1);
1921 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0);
1922 WREG32_SOC15(GC, 0, regRLC_LX6_CNTL, tmp);
1925 static void gfx_v11_0_load_rlcp_rlcv_microcode(struct amdgpu_device *adev)
1927 const struct rlc_firmware_header_v2_3 *hdr;
1928 const __le32 *fw_data;
1929 unsigned i, fw_size;
1932 hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data;
1934 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1935 le32_to_cpu(hdr->rlcp_ucode_offset_bytes));
1936 fw_size = le32_to_cpu(hdr->rlcp_ucode_size_bytes) / 4;
1938 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, 0);
1940 for (i = 0; i < fw_size; i++) {
1941 if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1943 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_DATA,
1944 le32_to_cpup(fw_data++));
1947 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, adev->gfx.rlc_fw_version);
1949 tmp = RREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE);
1950 tmp = REG_SET_FIELD(tmp, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1);
1951 WREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE, tmp);
1953 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1954 le32_to_cpu(hdr->rlcv_ucode_offset_bytes));
1955 fw_size = le32_to_cpu(hdr->rlcv_ucode_size_bytes) / 4;
1957 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, 0);
1959 for (i = 0; i < fw_size; i++) {
1960 if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1962 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_DATA,
1963 le32_to_cpup(fw_data++));
1966 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, adev->gfx.rlc_fw_version);
1968 tmp = RREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL);
1969 tmp = REG_SET_FIELD(tmp, RLC_GPU_IOV_F32_CNTL, ENABLE, 1);
1970 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL, tmp);
1973 static int gfx_v11_0_rlc_load_microcode(struct amdgpu_device *adev)
1975 const struct rlc_firmware_header_v2_0 *hdr;
1976 uint16_t version_major;
1977 uint16_t version_minor;
1979 if (!adev->gfx.rlc_fw)
1982 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1983 amdgpu_ucode_print_rlc_hdr(&hdr->header);
1985 version_major = le16_to_cpu(hdr->header.header_version_major);
1986 version_minor = le16_to_cpu(hdr->header.header_version_minor);
1988 if (version_major == 2) {
1989 gfx_v11_0_load_rlcg_microcode(adev);
1990 if (amdgpu_dpm == 1) {
1991 if (version_minor >= 2)
1992 gfx_v11_0_load_rlc_iram_dram_microcode(adev);
1993 if (version_minor == 3)
1994 gfx_v11_0_load_rlcp_rlcv_microcode(adev);
2003 static int gfx_v11_0_rlc_resume(struct amdgpu_device *adev)
2007 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
2008 gfx_v11_0_init_csb(adev);
2010 if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
2011 gfx_v11_0_rlc_enable_srm(adev);
2013 if (amdgpu_sriov_vf(adev)) {
2014 gfx_v11_0_init_csb(adev);
2018 adev->gfx.rlc.funcs->stop(adev);
2021 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, 0);
2024 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, 0);
2026 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
2027 /* legacy rlc firmware loading */
2028 r = gfx_v11_0_rlc_load_microcode(adev);
2033 gfx_v11_0_init_csb(adev);
2035 adev->gfx.rlc.funcs->start(adev);
2040 static int gfx_v11_0_config_me_cache(struct amdgpu_device *adev, uint64_t addr)
2042 uint32_t usec_timeout = 50000; /* wait for 50ms */
2046 /* Trigger an invalidation of the L1 instruction caches */
2047 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2048 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2049 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
2051 /* Wait for invalidation complete */
2052 for (i = 0; i < usec_timeout; i++) {
2053 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2054 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2055 INVALIDATE_CACHE_COMPLETE))
2060 if (i >= usec_timeout) {
2061 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2065 if (amdgpu_emu_mode == 1)
2066 adev->hdp.funcs->flush_hdp(adev, NULL);
2068 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
2069 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2070 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2071 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2072 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2073 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
2075 /* Program me ucode address into intruction cache address register */
2076 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
2077 lower_32_bits(addr) & 0xFFFFF000);
2078 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
2079 upper_32_bits(addr));
2084 static int gfx_v11_0_config_pfp_cache(struct amdgpu_device *adev, uint64_t addr)
2086 uint32_t usec_timeout = 50000; /* wait for 50ms */
2090 /* Trigger an invalidation of the L1 instruction caches */
2091 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2092 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2093 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
2095 /* Wait for invalidation complete */
2096 for (i = 0; i < usec_timeout; i++) {
2097 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2098 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2099 INVALIDATE_CACHE_COMPLETE))
2104 if (i >= usec_timeout) {
2105 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2109 if (amdgpu_emu_mode == 1)
2110 adev->hdp.funcs->flush_hdp(adev, NULL);
2112 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
2113 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2114 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2115 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2116 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2117 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
2119 /* Program pfp ucode address into intruction cache address register */
2120 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
2121 lower_32_bits(addr) & 0xFFFFF000);
2122 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
2123 upper_32_bits(addr));
2128 static int gfx_v11_0_config_mec_cache(struct amdgpu_device *adev, uint64_t addr)
2130 uint32_t usec_timeout = 50000; /* wait for 50ms */
2134 /* Trigger an invalidation of the L1 instruction caches */
2135 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2136 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2138 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
2140 /* Wait for invalidation complete */
2141 for (i = 0; i < usec_timeout; i++) {
2142 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2143 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2144 INVALIDATE_CACHE_COMPLETE))
2149 if (i >= usec_timeout) {
2150 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2154 if (amdgpu_emu_mode == 1)
2155 adev->hdp.funcs->flush_hdp(adev, NULL);
2157 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
2158 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2159 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2160 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2161 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
2163 /* Program mec1 ucode address into intruction cache address register */
2164 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO,
2165 lower_32_bits(addr) & 0xFFFFF000);
2166 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
2167 upper_32_bits(addr));
2172 static int gfx_v11_0_config_pfp_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2174 uint32_t usec_timeout = 50000; /* wait for 50ms */
2176 unsigned i, pipe_id;
2177 const struct gfx_firmware_header_v2_0 *pfp_hdr;
2179 pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
2180 adev->gfx.pfp_fw->data;
2182 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
2183 lower_32_bits(addr));
2184 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
2185 upper_32_bits(addr));
2187 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
2188 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2189 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2190 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2191 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
2194 * Programming any of the CP_PFP_IC_BASE registers
2195 * forces invalidation of the ME L1 I$. Wait for the
2196 * invalidation complete
2198 for (i = 0; i < usec_timeout; i++) {
2199 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2200 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2201 INVALIDATE_CACHE_COMPLETE))
2206 if (i >= usec_timeout) {
2207 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2211 /* Prime the L1 instruction caches */
2212 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2213 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1);
2214 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
2215 /* Waiting for cache primed*/
2216 for (i = 0; i < usec_timeout; i++) {
2217 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2218 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2224 if (i >= usec_timeout) {
2225 dev_err(adev->dev, "failed to prime instruction cache\n");
2229 mutex_lock(&adev->srbm_mutex);
2230 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2231 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2232 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
2233 (pfp_hdr->ucode_start_addr_hi << 30) |
2234 (pfp_hdr->ucode_start_addr_lo >> 2));
2235 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
2236 pfp_hdr->ucode_start_addr_hi >> 2);
2239 * Program CP_ME_CNTL to reset given PIPE to take
2240 * effect of CP_PFP_PRGRM_CNTR_START.
2242 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2244 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2245 PFP_PIPE0_RESET, 1);
2247 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2248 PFP_PIPE1_RESET, 1);
2249 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2251 /* Clear pfp pipe0 reset bit. */
2253 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2254 PFP_PIPE0_RESET, 0);
2256 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2257 PFP_PIPE1_RESET, 0);
2258 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2260 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO,
2261 lower_32_bits(addr2));
2262 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI,
2263 upper_32_bits(addr2));
2265 soc21_grbm_select(adev, 0, 0, 0, 0);
2266 mutex_unlock(&adev->srbm_mutex);
2268 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
2269 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
2270 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
2271 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
2273 /* Invalidate the data caches */
2274 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2275 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2276 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
2278 for (i = 0; i < usec_timeout; i++) {
2279 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2280 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
2281 INVALIDATE_DCACHE_COMPLETE))
2286 if (i >= usec_timeout) {
2287 dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
2294 static int gfx_v11_0_config_me_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2296 uint32_t usec_timeout = 50000; /* wait for 50ms */
2298 unsigned i, pipe_id;
2299 const struct gfx_firmware_header_v2_0 *me_hdr;
2301 me_hdr = (const struct gfx_firmware_header_v2_0 *)
2302 adev->gfx.me_fw->data;
2304 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
2305 lower_32_bits(addr));
2306 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
2307 upper_32_bits(addr));
2309 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
2310 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2311 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2312 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2313 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
2316 * Programming any of the CP_ME_IC_BASE registers
2317 * forces invalidation of the ME L1 I$. Wait for the
2318 * invalidation complete
2320 for (i = 0; i < usec_timeout; i++) {
2321 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2322 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2323 INVALIDATE_CACHE_COMPLETE))
2328 if (i >= usec_timeout) {
2329 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2333 /* Prime the instruction caches */
2334 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2335 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1);
2336 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
2338 /* Waiting for instruction cache primed*/
2339 for (i = 0; i < usec_timeout; i++) {
2340 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2341 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2347 if (i >= usec_timeout) {
2348 dev_err(adev->dev, "failed to prime instruction cache\n");
2352 mutex_lock(&adev->srbm_mutex);
2353 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2354 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2355 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
2356 (me_hdr->ucode_start_addr_hi << 30) |
2357 (me_hdr->ucode_start_addr_lo >> 2) );
2358 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
2359 me_hdr->ucode_start_addr_hi>>2);
2362 * Program CP_ME_CNTL to reset given PIPE to take
2363 * effect of CP_PFP_PRGRM_CNTR_START.
2365 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2367 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2370 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2372 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2374 /* Clear pfp pipe0 reset bit. */
2376 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2379 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2381 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2383 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO,
2384 lower_32_bits(addr2));
2385 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI,
2386 upper_32_bits(addr2));
2388 soc21_grbm_select(adev, 0, 0, 0, 0);
2389 mutex_unlock(&adev->srbm_mutex);
2391 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
2392 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
2393 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
2394 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
2396 /* Invalidate the data caches */
2397 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2398 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2399 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
2401 for (i = 0; i < usec_timeout; i++) {
2402 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2403 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
2404 INVALIDATE_DCACHE_COMPLETE))
2409 if (i >= usec_timeout) {
2410 dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
2417 static int gfx_v11_0_config_mec_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2419 uint32_t usec_timeout = 50000; /* wait for 50ms */
2422 const struct gfx_firmware_header_v2_0 *mec_hdr;
2424 mec_hdr = (const struct gfx_firmware_header_v2_0 *)
2425 adev->gfx.mec_fw->data;
2427 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
2428 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
2429 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2430 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2431 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
2433 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL);
2434 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0);
2435 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0);
2436 WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp);
2438 mutex_lock(&adev->srbm_mutex);
2439 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
2440 soc21_grbm_select(adev, 1, i, 0, 0);
2442 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, addr2);
2443 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI,
2444 upper_32_bits(addr2));
2446 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
2447 mec_hdr->ucode_start_addr_lo >> 2 |
2448 mec_hdr->ucode_start_addr_hi << 30);
2449 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
2450 mec_hdr->ucode_start_addr_hi >> 2);
2452 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, addr);
2453 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
2454 upper_32_bits(addr));
2456 mutex_unlock(&adev->srbm_mutex);
2457 soc21_grbm_select(adev, 0, 0, 0, 0);
2459 /* Trigger an invalidation of the L1 instruction caches */
2460 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
2461 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2462 WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp);
2464 /* Wait for invalidation complete */
2465 for (i = 0; i < usec_timeout; i++) {
2466 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
2467 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL,
2468 INVALIDATE_DCACHE_COMPLETE))
2473 if (i >= usec_timeout) {
2474 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2478 /* Trigger an invalidation of the L1 instruction caches */
2479 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2480 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2481 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
2483 /* Wait for invalidation complete */
2484 for (i = 0; i < usec_timeout; i++) {
2485 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2486 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2487 INVALIDATE_CACHE_COMPLETE))
2492 if (i >= usec_timeout) {
2493 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2500 static void gfx_v11_0_config_gfx_rs64(struct amdgpu_device *adev)
2502 const struct gfx_firmware_header_v2_0 *pfp_hdr;
2503 const struct gfx_firmware_header_v2_0 *me_hdr;
2504 const struct gfx_firmware_header_v2_0 *mec_hdr;
2505 uint32_t pipe_id, tmp;
2507 mec_hdr = (const struct gfx_firmware_header_v2_0 *)
2508 adev->gfx.mec_fw->data;
2509 me_hdr = (const struct gfx_firmware_header_v2_0 *)
2510 adev->gfx.me_fw->data;
2511 pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
2512 adev->gfx.pfp_fw->data;
2514 /* config pfp program start addr */
2515 for (pipe_id = 0; pipe_id < 2; pipe_id++) {
2516 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2517 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
2518 (pfp_hdr->ucode_start_addr_hi << 30) |
2519 (pfp_hdr->ucode_start_addr_lo >> 2));
2520 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
2521 pfp_hdr->ucode_start_addr_hi >> 2);
2523 soc21_grbm_select(adev, 0, 0, 0, 0);
2525 /* reset pfp pipe */
2526 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2527 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 1);
2528 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 1);
2529 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2531 /* clear pfp pipe reset */
2532 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 0);
2533 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 0);
2534 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2536 /* config me program start addr */
2537 for (pipe_id = 0; pipe_id < 2; pipe_id++) {
2538 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2539 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
2540 (me_hdr->ucode_start_addr_hi << 30) |
2541 (me_hdr->ucode_start_addr_lo >> 2) );
2542 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
2543 me_hdr->ucode_start_addr_hi>>2);
2545 soc21_grbm_select(adev, 0, 0, 0, 0);
2548 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2549 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 1);
2550 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 1);
2551 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2553 /* clear me pipe reset */
2554 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 0);
2555 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 0);
2556 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2558 /* config mec program start addr */
2559 for (pipe_id = 0; pipe_id < 4; pipe_id++) {
2560 soc21_grbm_select(adev, 1, pipe_id, 0, 0);
2561 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
2562 mec_hdr->ucode_start_addr_lo >> 2 |
2563 mec_hdr->ucode_start_addr_hi << 30);
2564 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
2565 mec_hdr->ucode_start_addr_hi >> 2);
2567 soc21_grbm_select(adev, 0, 0, 0, 0);
2569 /* reset mec pipe */
2570 tmp = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
2571 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1);
2572 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1);
2573 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1);
2574 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1);
2575 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp);
2577 /* clear mec pipe reset */
2578 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 0);
2579 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 0);
2580 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 0);
2581 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 0);
2582 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp);
2585 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
2588 uint32_t bootload_status;
2590 uint64_t addr, addr2;
2592 for (i = 0; i < adev->usec_timeout; i++) {
2593 cp_status = RREG32_SOC15(GC, 0, regCP_STAT);
2595 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 1) ||
2596 adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 4) ||
2597 adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 5, 0))
2598 bootload_status = RREG32_SOC15(GC, 0,
2599 regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1);
2601 bootload_status = RREG32_SOC15(GC, 0, regRLC_RLCS_BOOTLOAD_STATUS);
2603 if ((cp_status == 0) &&
2604 (REG_GET_FIELD(bootload_status,
2605 RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) {
2611 if (i >= adev->usec_timeout) {
2612 dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n");
2616 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
2617 if (adev->gfx.rs64_enable) {
2618 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2619 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME].offset;
2620 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
2621 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME_P0_STACK].offset;
2622 r = gfx_v11_0_config_me_cache_rs64(adev, addr, addr2);
2625 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2626 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP].offset;
2627 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
2628 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK].offset;
2629 r = gfx_v11_0_config_pfp_cache_rs64(adev, addr, addr2);
2632 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2633 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC].offset;
2634 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
2635 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK].offset;
2636 r = gfx_v11_0_config_mec_cache_rs64(adev, addr, addr2);
2640 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2641 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_ME].offset;
2642 r = gfx_v11_0_config_me_cache(adev, addr);
2645 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2646 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_PFP].offset;
2647 r = gfx_v11_0_config_pfp_cache(adev, addr);
2650 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2651 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_MEC].offset;
2652 r = gfx_v11_0_config_mec_cache(adev, addr);
2661 static int gfx_v11_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2664 u32 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2666 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2667 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2668 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2670 for (i = 0; i < adev->usec_timeout; i++) {
2671 if (RREG32_SOC15(GC, 0, regCP_STAT) == 0)
2676 if (i >= adev->usec_timeout)
2677 DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt");
2682 static int gfx_v11_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
2685 const struct gfx_firmware_header_v1_0 *pfp_hdr;
2686 const __le32 *fw_data;
2687 unsigned i, fw_size;
2689 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2690 adev->gfx.pfp_fw->data;
2692 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2694 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
2695 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2696 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes);
2698 r = amdgpu_bo_create_reserved(adev, pfp_hdr->header.ucode_size_bytes,
2699 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2700 &adev->gfx.pfp.pfp_fw_obj,
2701 &adev->gfx.pfp.pfp_fw_gpu_addr,
2702 (void **)&adev->gfx.pfp.pfp_fw_ptr);
2704 dev_err(adev->dev, "(%d) failed to create pfp fw bo\n", r);
2705 gfx_v11_0_pfp_fini(adev);
2709 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_data, fw_size);
2711 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj);
2712 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj);
2714 gfx_v11_0_config_pfp_cache(adev, adev->gfx.pfp.pfp_fw_gpu_addr);
2716 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, 0);
2718 for (i = 0; i < pfp_hdr->jt_size; i++)
2719 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_DATA,
2720 le32_to_cpup(fw_data + pfp_hdr->jt_offset + i));
2722 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2727 static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
2730 const struct gfx_firmware_header_v2_0 *pfp_hdr;
2731 const __le32 *fw_ucode, *fw_data;
2732 unsigned i, pipe_id, fw_ucode_size, fw_data_size;
2734 uint32_t usec_timeout = 50000; /* wait for 50ms */
2736 pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
2737 adev->gfx.pfp_fw->data;
2739 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2742 fw_ucode = (const __le32 *)(adev->gfx.pfp_fw->data +
2743 le32_to_cpu(pfp_hdr->ucode_offset_bytes));
2744 fw_ucode_size = le32_to_cpu(pfp_hdr->ucode_size_bytes);
2746 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
2747 le32_to_cpu(pfp_hdr->data_offset_bytes));
2748 fw_data_size = le32_to_cpu(pfp_hdr->data_size_bytes);
2751 r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
2753 AMDGPU_GEM_DOMAIN_VRAM |
2754 AMDGPU_GEM_DOMAIN_GTT,
2755 &adev->gfx.pfp.pfp_fw_obj,
2756 &adev->gfx.pfp.pfp_fw_gpu_addr,
2757 (void **)&adev->gfx.pfp.pfp_fw_ptr);
2759 dev_err(adev->dev, "(%d) failed to create pfp ucode fw bo\n", r);
2760 gfx_v11_0_pfp_fini(adev);
2764 r = amdgpu_bo_create_reserved(adev, fw_data_size,
2766 AMDGPU_GEM_DOMAIN_VRAM |
2767 AMDGPU_GEM_DOMAIN_GTT,
2768 &adev->gfx.pfp.pfp_fw_data_obj,
2769 &adev->gfx.pfp.pfp_fw_data_gpu_addr,
2770 (void **)&adev->gfx.pfp.pfp_fw_data_ptr);
2772 dev_err(adev->dev, "(%d) failed to create pfp data fw bo\n", r);
2773 gfx_v11_0_pfp_fini(adev);
2777 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_ucode, fw_ucode_size);
2778 memcpy(adev->gfx.pfp.pfp_fw_data_ptr, fw_data, fw_data_size);
2780 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj);
2781 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_data_obj);
2782 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj);
2783 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj);
2785 if (amdgpu_emu_mode == 1)
2786 adev->hdp.funcs->flush_hdp(adev, NULL);
2788 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
2789 lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
2790 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
2791 upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
2793 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
2794 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2795 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2796 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2797 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
2800 * Programming any of the CP_PFP_IC_BASE registers
2801 * forces invalidation of the ME L1 I$. Wait for the
2802 * invalidation complete
2804 for (i = 0; i < usec_timeout; i++) {
2805 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2806 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2807 INVALIDATE_CACHE_COMPLETE))
2812 if (i >= usec_timeout) {
2813 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2817 /* Prime the L1 instruction caches */
2818 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2819 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1);
2820 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
2821 /* Waiting for cache primed*/
2822 for (i = 0; i < usec_timeout; i++) {
2823 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2824 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2830 if (i >= usec_timeout) {
2831 dev_err(adev->dev, "failed to prime instruction cache\n");
2835 mutex_lock(&adev->srbm_mutex);
2836 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2837 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2838 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
2839 (pfp_hdr->ucode_start_addr_hi << 30) |
2840 (pfp_hdr->ucode_start_addr_lo >> 2) );
2841 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
2842 pfp_hdr->ucode_start_addr_hi>>2);
2845 * Program CP_ME_CNTL to reset given PIPE to take
2846 * effect of CP_PFP_PRGRM_CNTR_START.
2848 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2850 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2851 PFP_PIPE0_RESET, 1);
2853 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2854 PFP_PIPE1_RESET, 1);
2855 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2857 /* Clear pfp pipe0 reset bit. */
2859 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2860 PFP_PIPE0_RESET, 0);
2862 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2863 PFP_PIPE1_RESET, 0);
2864 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2866 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO,
2867 lower_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr));
2868 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI,
2869 upper_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr));
2871 soc21_grbm_select(adev, 0, 0, 0, 0);
2872 mutex_unlock(&adev->srbm_mutex);
2874 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
2875 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
2876 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
2877 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
2879 /* Invalidate the data caches */
2880 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2881 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2882 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
2884 for (i = 0; i < usec_timeout; i++) {
2885 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2886 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
2887 INVALIDATE_DCACHE_COMPLETE))
2892 if (i >= usec_timeout) {
2893 dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
2900 static int gfx_v11_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
2903 const struct gfx_firmware_header_v1_0 *me_hdr;
2904 const __le32 *fw_data;
2905 unsigned i, fw_size;
2907 me_hdr = (const struct gfx_firmware_header_v1_0 *)
2908 adev->gfx.me_fw->data;
2910 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2912 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
2913 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2914 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes);
2916 r = amdgpu_bo_create_reserved(adev, me_hdr->header.ucode_size_bytes,
2917 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2918 &adev->gfx.me.me_fw_obj,
2919 &adev->gfx.me.me_fw_gpu_addr,
2920 (void **)&adev->gfx.me.me_fw_ptr);
2922 dev_err(adev->dev, "(%d) failed to create me fw bo\n", r);
2923 gfx_v11_0_me_fini(adev);
2927 memcpy(adev->gfx.me.me_fw_ptr, fw_data, fw_size);
2929 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj);
2930 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj);
2932 gfx_v11_0_config_me_cache(adev, adev->gfx.me.me_fw_gpu_addr);
2934 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, 0);
2936 for (i = 0; i < me_hdr->jt_size; i++)
2937 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_DATA,
2938 le32_to_cpup(fw_data + me_hdr->jt_offset + i));
2940 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, adev->gfx.me_fw_version);
2945 static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
2948 const struct gfx_firmware_header_v2_0 *me_hdr;
2949 const __le32 *fw_ucode, *fw_data;
2950 unsigned i, pipe_id, fw_ucode_size, fw_data_size;
2952 uint32_t usec_timeout = 50000; /* wait for 50ms */
2954 me_hdr = (const struct gfx_firmware_header_v2_0 *)
2955 adev->gfx.me_fw->data;
2957 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2960 fw_ucode = (const __le32 *)(adev->gfx.me_fw->data +
2961 le32_to_cpu(me_hdr->ucode_offset_bytes));
2962 fw_ucode_size = le32_to_cpu(me_hdr->ucode_size_bytes);
2964 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
2965 le32_to_cpu(me_hdr->data_offset_bytes));
2966 fw_data_size = le32_to_cpu(me_hdr->data_size_bytes);
2969 r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
2971 AMDGPU_GEM_DOMAIN_VRAM |
2972 AMDGPU_GEM_DOMAIN_GTT,
2973 &adev->gfx.me.me_fw_obj,
2974 &adev->gfx.me.me_fw_gpu_addr,
2975 (void **)&adev->gfx.me.me_fw_ptr);
2977 dev_err(adev->dev, "(%d) failed to create me ucode bo\n", r);
2978 gfx_v11_0_me_fini(adev);
2982 r = amdgpu_bo_create_reserved(adev, fw_data_size,
2984 AMDGPU_GEM_DOMAIN_VRAM |
2985 AMDGPU_GEM_DOMAIN_GTT,
2986 &adev->gfx.me.me_fw_data_obj,
2987 &adev->gfx.me.me_fw_data_gpu_addr,
2988 (void **)&adev->gfx.me.me_fw_data_ptr);
2990 dev_err(adev->dev, "(%d) failed to create me data bo\n", r);
2991 gfx_v11_0_pfp_fini(adev);
2995 memcpy(adev->gfx.me.me_fw_ptr, fw_ucode, fw_ucode_size);
2996 memcpy(adev->gfx.me.me_fw_data_ptr, fw_data, fw_data_size);
2998 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj);
2999 amdgpu_bo_kunmap(adev->gfx.me.me_fw_data_obj);
3000 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj);
3001 amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj);
3003 if (amdgpu_emu_mode == 1)
3004 adev->hdp.funcs->flush_hdp(adev, NULL);
3006 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
3007 lower_32_bits(adev->gfx.me.me_fw_gpu_addr));
3008 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
3009 upper_32_bits(adev->gfx.me.me_fw_gpu_addr));
3011 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
3012 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
3013 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
3014 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
3015 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
3018 * Programming any of the CP_ME_IC_BASE registers
3019 * forces invalidation of the ME L1 I$. Wait for the
3020 * invalidation complete
3022 for (i = 0; i < usec_timeout; i++) {
3023 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
3024 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
3025 INVALIDATE_CACHE_COMPLETE))
3030 if (i >= usec_timeout) {
3031 dev_err(adev->dev, "failed to invalidate instruction cache\n");
3035 /* Prime the instruction caches */
3036 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
3037 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1);
3038 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
3040 /* Waiting for instruction cache primed*/
3041 for (i = 0; i < usec_timeout; i++) {
3042 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
3043 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
3049 if (i >= usec_timeout) {
3050 dev_err(adev->dev, "failed to prime instruction cache\n");
3054 mutex_lock(&adev->srbm_mutex);
3055 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
3056 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
3057 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
3058 (me_hdr->ucode_start_addr_hi << 30) |
3059 (me_hdr->ucode_start_addr_lo >> 2) );
3060 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
3061 me_hdr->ucode_start_addr_hi>>2);
3064 * Program CP_ME_CNTL to reset given PIPE to take
3065 * effect of CP_PFP_PRGRM_CNTR_START.
3067 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
3069 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3072 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3074 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3076 /* Clear pfp pipe0 reset bit. */
3078 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3081 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3083 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3085 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO,
3086 lower_32_bits(adev->gfx.me.me_fw_data_gpu_addr));
3087 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI,
3088 upper_32_bits(adev->gfx.me.me_fw_data_gpu_addr));
3090 soc21_grbm_select(adev, 0, 0, 0, 0);
3091 mutex_unlock(&adev->srbm_mutex);
3093 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
3094 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
3095 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
3096 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
3098 /* Invalidate the data caches */
3099 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3100 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
3101 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
3103 for (i = 0; i < usec_timeout; i++) {
3104 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3105 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
3106 INVALIDATE_DCACHE_COMPLETE))
3111 if (i >= usec_timeout) {
3112 dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
3119 static int gfx_v11_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
3123 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw)
3126 gfx_v11_0_cp_gfx_enable(adev, false);
3128 if (adev->gfx.rs64_enable)
3129 r = gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(adev);
3131 r = gfx_v11_0_cp_gfx_load_pfp_microcode(adev);
3133 dev_err(adev->dev, "(%d) failed to load pfp fw\n", r);
3137 if (adev->gfx.rs64_enable)
3138 r = gfx_v11_0_cp_gfx_load_me_microcode_rs64(adev);
3140 r = gfx_v11_0_cp_gfx_load_me_microcode(adev);
3142 dev_err(adev->dev, "(%d) failed to load me fw\n", r);
3149 static int gfx_v11_0_cp_gfx_start(struct amdgpu_device *adev)
3151 struct amdgpu_ring *ring;
3152 const struct cs_section_def *sect = NULL;
3153 const struct cs_extent_def *ext = NULL;
3158 WREG32_SOC15(GC, 0, regCP_MAX_CONTEXT,
3159 adev->gfx.config.max_hw_contexts - 1);
3160 WREG32_SOC15(GC, 0, regCP_DEVICE_ID, 1);
3162 if (!amdgpu_async_gfx_ring)
3163 gfx_v11_0_cp_gfx_enable(adev, true);
3165 ring = &adev->gfx.gfx_ring[0];
3166 r = amdgpu_ring_alloc(ring, gfx_v11_0_get_csb_size(adev));
3168 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3172 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3173 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3175 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3176 amdgpu_ring_write(ring, 0x80000000);
3177 amdgpu_ring_write(ring, 0x80000000);
3179 for (sect = gfx11_cs_data; sect->section != NULL; ++sect) {
3180 for (ext = sect->section; ext->extent != NULL; ++ext) {
3181 if (sect->id == SECT_CONTEXT) {
3182 amdgpu_ring_write(ring,
3183 PACKET3(PACKET3_SET_CONTEXT_REG,
3185 amdgpu_ring_write(ring, ext->reg_index -
3186 PACKET3_SET_CONTEXT_REG_START);
3187 for (i = 0; i < ext->reg_count; i++)
3188 amdgpu_ring_write(ring, ext->extent[i]);
3194 SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
3195 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
3196 amdgpu_ring_write(ring, ctx_reg_offset);
3197 amdgpu_ring_write(ring, adev->gfx.config.pa_sc_tile_steering_override);
3199 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3200 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3202 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3203 amdgpu_ring_write(ring, 0);
3205 amdgpu_ring_commit(ring);
3207 /* submit cs packet to copy state 0 to next available state */
3208 if (adev->gfx.num_gfx_rings > 1) {
3209 /* maximum supported gfx ring is 2 */
3210 ring = &adev->gfx.gfx_ring[1];
3211 r = amdgpu_ring_alloc(ring, 2);
3213 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3217 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3218 amdgpu_ring_write(ring, 0);
3220 amdgpu_ring_commit(ring);
3225 static void gfx_v11_0_cp_gfx_switch_pipe(struct amdgpu_device *adev,
3230 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
3231 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe);
3233 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
3236 static void gfx_v11_0_cp_gfx_set_doorbell(struct amdgpu_device *adev,
3237 struct amdgpu_ring *ring)
3241 tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL);
3242 if (ring->use_doorbell) {
3243 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3244 DOORBELL_OFFSET, ring->doorbell_index);
3245 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3248 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3251 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, tmp);
3253 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
3254 DOORBELL_RANGE_LOWER, ring->doorbell_index);
3255 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, tmp);
3257 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER,
3258 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
3261 static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev)
3263 struct amdgpu_ring *ring;
3266 u64 rb_addr, rptr_addr, wptr_gpu_addr;
3268 /* Set the write pointer delay */
3269 WREG32_SOC15(GC, 0, regCP_RB_WPTR_DELAY, 0);
3271 /* set the RB to use vmid 0 */
3272 WREG32_SOC15(GC, 0, regCP_RB_VMID, 0);
3274 /* Init gfx ring 0 for pipe 0 */
3275 mutex_lock(&adev->srbm_mutex);
3276 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
3278 /* Set ring buffer size */
3279 ring = &adev->gfx.gfx_ring[0];
3280 rb_bufsz = order_base_2(ring->ring_size / 8);
3281 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
3282 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
3283 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp);
3285 /* Initialize the ring buffer's write pointers */
3287 WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr));
3288 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3290 /* set the wb address wether it's enabled or not */
3291 rptr_addr = ring->rptr_gpu_addr;
3292 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
3293 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
3294 CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3296 wptr_gpu_addr = ring->wptr_gpu_addr;
3297 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO,
3298 lower_32_bits(wptr_gpu_addr));
3299 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI,
3300 upper_32_bits(wptr_gpu_addr));
3303 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp);
3305 rb_addr = ring->gpu_addr >> 8;
3306 WREG32_SOC15(GC, 0, regCP_RB0_BASE, rb_addr);
3307 WREG32_SOC15(GC, 0, regCP_RB0_BASE_HI, upper_32_bits(rb_addr));
3309 WREG32_SOC15(GC, 0, regCP_RB_ACTIVE, 1);
3311 gfx_v11_0_cp_gfx_set_doorbell(adev, ring);
3312 mutex_unlock(&adev->srbm_mutex);
3314 /* Init gfx ring 1 for pipe 1 */
3315 if (adev->gfx.num_gfx_rings > 1) {
3316 mutex_lock(&adev->srbm_mutex);
3317 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
3318 /* maximum supported gfx ring is 2 */
3319 ring = &adev->gfx.gfx_ring[1];
3320 rb_bufsz = order_base_2(ring->ring_size / 8);
3321 tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
3322 tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
3323 WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp);
3324 /* Initialize the ring buffer's write pointers */
3326 WREG32_SOC15(GC, 0, regCP_RB1_WPTR, lower_32_bits(ring->wptr));
3327 WREG32_SOC15(GC, 0, regCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
3328 /* Set the wb address wether it's enabled or not */
3329 rptr_addr = ring->rptr_gpu_addr;
3330 WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
3331 WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
3332 CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3333 wptr_gpu_addr = ring->wptr_gpu_addr;
3334 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO,
3335 lower_32_bits(wptr_gpu_addr));
3336 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI,
3337 upper_32_bits(wptr_gpu_addr));
3340 WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp);
3342 rb_addr = ring->gpu_addr >> 8;
3343 WREG32_SOC15(GC, 0, regCP_RB1_BASE, rb_addr);
3344 WREG32_SOC15(GC, 0, regCP_RB1_BASE_HI, upper_32_bits(rb_addr));
3345 WREG32_SOC15(GC, 0, regCP_RB1_ACTIVE, 1);
3347 gfx_v11_0_cp_gfx_set_doorbell(adev, ring);
3348 mutex_unlock(&adev->srbm_mutex);
3350 /* Switch to pipe 0 */
3351 mutex_lock(&adev->srbm_mutex);
3352 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
3353 mutex_unlock(&adev->srbm_mutex);
3355 /* start the ring */
3356 gfx_v11_0_cp_gfx_start(adev);
3361 static void gfx_v11_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
3365 if (adev->gfx.rs64_enable) {
3366 data = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
3367 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE,
3369 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET,
3371 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET,
3373 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET,
3375 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET,
3377 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE,
3379 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE,
3381 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE,
3383 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE,
3385 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT,
3387 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, data);
3389 data = RREG32_SOC15(GC, 0, regCP_MEC_CNTL);
3392 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 0);
3393 if (!adev->enable_mes_kiq)
3394 data = REG_SET_FIELD(data, CP_MEC_CNTL,
3397 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 1);
3398 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME2_HALT, 1);
3400 WREG32_SOC15(GC, 0, regCP_MEC_CNTL, data);
3406 static int gfx_v11_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3408 const struct gfx_firmware_header_v1_0 *mec_hdr;
3409 const __le32 *fw_data;
3410 unsigned i, fw_size;
3414 if (!adev->gfx.mec_fw)
3417 gfx_v11_0_cp_compute_enable(adev, false);
3419 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3420 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3422 fw_data = (const __le32 *)
3423 (adev->gfx.mec_fw->data +
3424 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
3425 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
3427 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
3428 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
3429 &adev->gfx.mec.mec_fw_obj,
3430 &adev->gfx.mec.mec_fw_gpu_addr,
3433 dev_err(adev->dev, "(%d) failed to create mec fw bo\n", r);
3434 gfx_v11_0_mec_fini(adev);
3438 memcpy(fw, fw_data, fw_size);
3440 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
3441 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
3443 gfx_v11_0_config_mec_cache(adev, adev->gfx.mec.mec_fw_gpu_addr);
3446 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, 0);
3448 for (i = 0; i < mec_hdr->jt_size; i++)
3449 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_DATA,
3450 le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
3452 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version);
3457 static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev)
3459 const struct gfx_firmware_header_v2_0 *mec_hdr;
3460 const __le32 *fw_ucode, *fw_data;
3461 u32 tmp, fw_ucode_size, fw_data_size;
3462 u32 i, usec_timeout = 50000; /* Wait for 50 ms */
3463 u32 *fw_ucode_ptr, *fw_data_ptr;
3466 if (!adev->gfx.mec_fw)
3469 gfx_v11_0_cp_compute_enable(adev, false);
3471 mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
3472 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3474 fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data +
3475 le32_to_cpu(mec_hdr->ucode_offset_bytes));
3476 fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes);
3478 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
3479 le32_to_cpu(mec_hdr->data_offset_bytes));
3480 fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes);
3482 r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
3484 AMDGPU_GEM_DOMAIN_VRAM |
3485 AMDGPU_GEM_DOMAIN_GTT,
3486 &adev->gfx.mec.mec_fw_obj,
3487 &adev->gfx.mec.mec_fw_gpu_addr,
3488 (void **)&fw_ucode_ptr);
3490 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
3491 gfx_v11_0_mec_fini(adev);
3495 r = amdgpu_bo_create_reserved(adev, fw_data_size,
3497 AMDGPU_GEM_DOMAIN_VRAM |
3498 AMDGPU_GEM_DOMAIN_GTT,
3499 &adev->gfx.mec.mec_fw_data_obj,
3500 &adev->gfx.mec.mec_fw_data_gpu_addr,
3501 (void **)&fw_data_ptr);
3503 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
3504 gfx_v11_0_mec_fini(adev);
3508 memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size);
3509 memcpy(fw_data_ptr, fw_data, fw_data_size);
3511 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
3512 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj);
3513 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
3514 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj);
3516 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
3517 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
3518 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
3519 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
3520 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
3522 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL);
3523 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0);
3524 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0);
3525 WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp);
3527 mutex_lock(&adev->srbm_mutex);
3528 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
3529 soc21_grbm_select(adev, 1, i, 0, 0);
3531 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, adev->gfx.mec.mec_fw_data_gpu_addr);
3532 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI,
3533 upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr));
3535 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
3536 mec_hdr->ucode_start_addr_lo >> 2 |
3537 mec_hdr->ucode_start_addr_hi << 30);
3538 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
3539 mec_hdr->ucode_start_addr_hi >> 2);
3541 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, adev->gfx.mec.mec_fw_gpu_addr);
3542 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
3543 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
3545 mutex_unlock(&adev->srbm_mutex);
3546 soc21_grbm_select(adev, 0, 0, 0, 0);
3548 /* Trigger an invalidation of the L1 instruction caches */
3549 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
3550 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
3551 WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp);
3553 /* Wait for invalidation complete */
3554 for (i = 0; i < usec_timeout; i++) {
3555 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
3556 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL,
3557 INVALIDATE_DCACHE_COMPLETE))
3562 if (i >= usec_timeout) {
3563 dev_err(adev->dev, "failed to invalidate instruction cache\n");
3567 /* Trigger an invalidation of the L1 instruction caches */
3568 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
3569 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
3570 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
3572 /* Wait for invalidation complete */
3573 for (i = 0; i < usec_timeout; i++) {
3574 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
3575 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
3576 INVALIDATE_CACHE_COMPLETE))
3581 if (i >= usec_timeout) {
3582 dev_err(adev->dev, "failed to invalidate instruction cache\n");
3589 static void gfx_v11_0_kiq_setting(struct amdgpu_ring *ring)
3592 struct amdgpu_device *adev = ring->adev;
3594 /* tell RLC which is KIQ queue */
3595 tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
3597 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
3598 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
3600 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
3603 static void gfx_v11_0_cp_set_doorbell_range(struct amdgpu_device *adev)
3605 /* set graphics engine doorbell range */
3606 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER,
3607 (adev->doorbell_index.gfx_ring0 * 2) << 2);
3608 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER,
3609 (adev->doorbell_index.gfx_userqueue_end * 2) << 2);
3611 /* set compute engine doorbell range */
3612 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER,
3613 (adev->doorbell_index.kiq * 2) << 2);
3614 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER,
3615 (adev->doorbell_index.userqueue_end * 2) << 2);
3618 static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
3619 struct amdgpu_mqd_prop *prop)
3621 struct v11_gfx_mqd *mqd = m;
3622 uint64_t hqd_gpu_addr, wb_gpu_addr;
3626 /* set up gfx hqd wptr */
3627 mqd->cp_gfx_hqd_wptr = 0;
3628 mqd->cp_gfx_hqd_wptr_hi = 0;
3630 /* set the pointer to the MQD */
3631 mqd->cp_mqd_base_addr = prop->mqd_gpu_addr & 0xfffffffc;
3632 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
3634 /* set up mqd control */
3635 tmp = RREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL);
3636 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0);
3637 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1);
3638 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0);
3639 mqd->cp_gfx_mqd_control = tmp;
3641 /* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */
3642 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID);
3643 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0);
3644 mqd->cp_gfx_hqd_vmid = 0;
3646 /* set up default queue priority level
3647 * 0x0 = low priority, 0x1 = high priority */
3648 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY);
3649 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, 0);
3650 mqd->cp_gfx_hqd_queue_priority = tmp;
3652 /* set up time quantum */
3653 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM);
3654 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1);
3655 mqd->cp_gfx_hqd_quantum = tmp;
3657 /* set up gfx hqd base. this is similar as CP_RB_BASE */
3658 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8;
3659 mqd->cp_gfx_hqd_base = hqd_gpu_addr;
3660 mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr);
3662 /* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */
3663 wb_gpu_addr = prop->rptr_gpu_addr;
3664 mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc;
3665 mqd->cp_gfx_hqd_rptr_addr_hi =
3666 upper_32_bits(wb_gpu_addr) & 0xffff;
3668 /* set up rb_wptr_poll addr */
3669 wb_gpu_addr = prop->wptr_gpu_addr;
3670 mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3671 mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3673 /* set up the gfx_hqd_control, similar as CP_RB0_CNTL */
3674 rb_bufsz = order_base_2(prop->queue_size / 4) - 1;
3675 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL);
3676 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz);
3677 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2);
3679 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1);
3681 mqd->cp_gfx_hqd_cntl = tmp;
3683 /* set up cp_doorbell_control */
3684 tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL);
3685 if (prop->use_doorbell) {
3686 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3687 DOORBELL_OFFSET, prop->doorbell_index);
3688 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3691 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3693 mqd->cp_rb_doorbell_control = tmp;
3695 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3696 mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR);
3698 /* active the queue */
3699 mqd->cp_gfx_hqd_active = 1;
3704 static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring)
3706 struct amdgpu_device *adev = ring->adev;
3707 struct v11_gfx_mqd *mqd = ring->mqd_ptr;
3708 int mqd_idx = ring - &adev->gfx.gfx_ring[0];
3710 if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
3711 memset((void *)mqd, 0, sizeof(*mqd));
3712 mutex_lock(&adev->srbm_mutex);
3713 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3714 amdgpu_ring_init_mqd(ring);
3715 soc21_grbm_select(adev, 0, 0, 0, 0);
3716 mutex_unlock(&adev->srbm_mutex);
3717 if (adev->gfx.me.mqd_backup[mqd_idx])
3718 memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
3720 /* restore mqd with the backup copy */
3721 if (adev->gfx.me.mqd_backup[mqd_idx])
3722 memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
3723 /* reset the ring */
3725 *ring->wptr_cpu_addr = 0;
3726 amdgpu_ring_clear_ring(ring);
3732 static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
3735 struct amdgpu_ring *ring;
3737 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3738 ring = &adev->gfx.gfx_ring[i];
3740 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3741 if (unlikely(r != 0))
3744 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3746 r = gfx_v11_0_gfx_init_queue(ring);
3747 amdgpu_bo_kunmap(ring->mqd_obj);
3748 ring->mqd_ptr = NULL;
3750 amdgpu_bo_unreserve(ring->mqd_obj);
3755 r = amdgpu_gfx_enable_kgq(adev, 0);
3759 return gfx_v11_0_cp_gfx_start(adev);
3762 static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
3763 struct amdgpu_mqd_prop *prop)
3765 struct v11_compute_mqd *mqd = m;
3766 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
3769 mqd->header = 0xC0310800;
3770 mqd->compute_pipelinestat_enable = 0x00000001;
3771 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3772 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3773 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3774 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3775 mqd->compute_misc_reserved = 0x00000007;
3777 eop_base_addr = prop->eop_gpu_addr >> 8;
3778 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
3779 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
3781 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3782 tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL);
3783 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3784 (order_base_2(GFX11_MEC_HPD_SIZE / 4) - 1));
3786 mqd->cp_hqd_eop_control = tmp;
3788 /* enable doorbell? */
3789 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
3791 if (prop->use_doorbell) {
3792 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3793 DOORBELL_OFFSET, prop->doorbell_index);
3794 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3796 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3797 DOORBELL_SOURCE, 0);
3798 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3801 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3805 mqd->cp_hqd_pq_doorbell_control = tmp;
3807 /* disable the queue if it's active */
3808 mqd->cp_hqd_dequeue_request = 0;
3809 mqd->cp_hqd_pq_rptr = 0;
3810 mqd->cp_hqd_pq_wptr_lo = 0;
3811 mqd->cp_hqd_pq_wptr_hi = 0;
3813 /* set the pointer to the MQD */
3814 mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc;
3815 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
3817 /* set MQD vmid to 0 */
3818 tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL);
3819 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3820 mqd->cp_mqd_control = tmp;
3822 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3823 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8;
3824 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3825 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3827 /* set up the HQD, this is similar to CP_RB0_CNTL */
3828 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL);
3829 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3830 (order_base_2(prop->queue_size / 4) - 1));
3831 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3832 (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
3833 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
3834 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
3835 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3836 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3837 mqd->cp_hqd_pq_control = tmp;
3839 /* set the wb address whether it's enabled or not */
3840 wb_gpu_addr = prop->rptr_gpu_addr;
3841 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3842 mqd->cp_hqd_pq_rptr_report_addr_hi =
3843 upper_32_bits(wb_gpu_addr) & 0xffff;
3845 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3846 wb_gpu_addr = prop->wptr_gpu_addr;
3847 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3848 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3851 /* enable the doorbell if requested */
3852 if (prop->use_doorbell) {
3853 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
3854 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3855 DOORBELL_OFFSET, prop->doorbell_index);
3857 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3859 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3860 DOORBELL_SOURCE, 0);
3861 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3865 mqd->cp_hqd_pq_doorbell_control = tmp;
3867 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3868 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR);
3870 /* set the vmid for the queue */
3871 mqd->cp_hqd_vmid = 0;
3873 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE);
3874 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55);
3875 mqd->cp_hqd_persistent_state = tmp;
3877 /* set MIN_IB_AVAIL_SIZE */
3878 tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL);
3879 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
3880 mqd->cp_hqd_ib_control = tmp;
3882 /* set static priority for a compute queue/ring */
3883 mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority;
3884 mqd->cp_hqd_queue_priority = prop->hqd_queue_priority;
3886 mqd->cp_hqd_active = prop->hqd_active;
3891 static int gfx_v11_0_kiq_init_register(struct amdgpu_ring *ring)
3893 struct amdgpu_device *adev = ring->adev;
3894 struct v11_compute_mqd *mqd = ring->mqd_ptr;
3897 /* inactivate the queue */
3898 if (amdgpu_sriov_vf(adev))
3899 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 0);
3901 /* disable wptr polling */
3902 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3904 /* write the EOP addr */
3905 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR,
3906 mqd->cp_hqd_eop_base_addr_lo);
3907 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI,
3908 mqd->cp_hqd_eop_base_addr_hi);
3910 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3911 WREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL,
3912 mqd->cp_hqd_eop_control);
3914 /* enable doorbell? */
3915 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL,
3916 mqd->cp_hqd_pq_doorbell_control);
3918 /* disable the queue if it's active */
3919 if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) {
3920 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1);
3921 for (j = 0; j < adev->usec_timeout; j++) {
3922 if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
3926 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST,
3927 mqd->cp_hqd_dequeue_request);
3928 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR,
3929 mqd->cp_hqd_pq_rptr);
3930 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO,
3931 mqd->cp_hqd_pq_wptr_lo);
3932 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI,
3933 mqd->cp_hqd_pq_wptr_hi);
3936 /* set the pointer to the MQD */
3937 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR,
3938 mqd->cp_mqd_base_addr_lo);
3939 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI,
3940 mqd->cp_mqd_base_addr_hi);
3942 /* set MQD vmid to 0 */
3943 WREG32_SOC15(GC, 0, regCP_MQD_CONTROL,
3944 mqd->cp_mqd_control);
3946 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3947 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE,
3948 mqd->cp_hqd_pq_base_lo);
3949 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI,
3950 mqd->cp_hqd_pq_base_hi);
3952 /* set up the HQD, this is similar to CP_RB0_CNTL */
3953 WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL,
3954 mqd->cp_hqd_pq_control);
3956 /* set the wb address whether it's enabled or not */
3957 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR,
3958 mqd->cp_hqd_pq_rptr_report_addr_lo);
3959 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3960 mqd->cp_hqd_pq_rptr_report_addr_hi);
3962 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3963 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR,
3964 mqd->cp_hqd_pq_wptr_poll_addr_lo);
3965 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3966 mqd->cp_hqd_pq_wptr_poll_addr_hi);
3968 /* enable the doorbell if requested */
3969 if (ring->use_doorbell) {
3970 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER,
3971 (adev->doorbell_index.kiq * 2) << 2);
3972 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER,
3973 (adev->doorbell_index.userqueue_end * 2) << 2);
3976 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL,
3977 mqd->cp_hqd_pq_doorbell_control);
3979 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3980 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO,
3981 mqd->cp_hqd_pq_wptr_lo);
3982 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI,
3983 mqd->cp_hqd_pq_wptr_hi);
3985 /* set the vmid for the queue */
3986 WREG32_SOC15(GC, 0, regCP_HQD_VMID, mqd->cp_hqd_vmid);
3988 WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE,
3989 mqd->cp_hqd_persistent_state);
3991 /* activate the queue */
3992 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE,
3993 mqd->cp_hqd_active);
3995 if (ring->use_doorbell)
3996 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
4001 static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
4003 struct amdgpu_device *adev = ring->adev;
4004 struct v11_compute_mqd *mqd = ring->mqd_ptr;
4006 gfx_v11_0_kiq_setting(ring);
4008 if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
4009 /* reset MQD to a clean status */
4010 if (adev->gfx.kiq[0].mqd_backup)
4011 memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
4013 /* reset ring buffer */
4015 amdgpu_ring_clear_ring(ring);
4017 mutex_lock(&adev->srbm_mutex);
4018 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4019 gfx_v11_0_kiq_init_register(ring);
4020 soc21_grbm_select(adev, 0, 0, 0, 0);
4021 mutex_unlock(&adev->srbm_mutex);
4023 memset((void *)mqd, 0, sizeof(*mqd));
4024 if (amdgpu_sriov_vf(adev) && adev->in_suspend)
4025 amdgpu_ring_clear_ring(ring);
4026 mutex_lock(&adev->srbm_mutex);
4027 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4028 amdgpu_ring_init_mqd(ring);
4029 gfx_v11_0_kiq_init_register(ring);
4030 soc21_grbm_select(adev, 0, 0, 0, 0);
4031 mutex_unlock(&adev->srbm_mutex);
4033 if (adev->gfx.kiq[0].mqd_backup)
4034 memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
4040 static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring)
4042 struct amdgpu_device *adev = ring->adev;
4043 struct v11_compute_mqd *mqd = ring->mqd_ptr;
4044 int mqd_idx = ring - &adev->gfx.compute_ring[0];
4046 if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
4047 memset((void *)mqd, 0, sizeof(*mqd));
4048 mutex_lock(&adev->srbm_mutex);
4049 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4050 amdgpu_ring_init_mqd(ring);
4051 soc21_grbm_select(adev, 0, 0, 0, 0);
4052 mutex_unlock(&adev->srbm_mutex);
4054 if (adev->gfx.mec.mqd_backup[mqd_idx])
4055 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
4057 /* restore MQD to a clean status */
4058 if (adev->gfx.mec.mqd_backup[mqd_idx])
4059 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
4060 /* reset ring buffer */
4062 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
4063 amdgpu_ring_clear_ring(ring);
4069 static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev)
4071 struct amdgpu_ring *ring;
4074 ring = &adev->gfx.kiq[0].ring;
4076 r = amdgpu_bo_reserve(ring->mqd_obj, false);
4077 if (unlikely(r != 0))
4080 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
4081 if (unlikely(r != 0)) {
4082 amdgpu_bo_unreserve(ring->mqd_obj);
4086 gfx_v11_0_kiq_init_queue(ring);
4087 amdgpu_bo_kunmap(ring->mqd_obj);
4088 ring->mqd_ptr = NULL;
4089 amdgpu_bo_unreserve(ring->mqd_obj);
4090 ring->sched.ready = true;
4094 static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev)
4096 struct amdgpu_ring *ring = NULL;
4099 if (!amdgpu_async_gfx_ring)
4100 gfx_v11_0_cp_compute_enable(adev, true);
4102 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4103 ring = &adev->gfx.compute_ring[i];
4105 r = amdgpu_bo_reserve(ring->mqd_obj, false);
4106 if (unlikely(r != 0))
4108 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
4110 r = gfx_v11_0_kcq_init_queue(ring);
4111 amdgpu_bo_kunmap(ring->mqd_obj);
4112 ring->mqd_ptr = NULL;
4114 amdgpu_bo_unreserve(ring->mqd_obj);
4119 r = amdgpu_gfx_enable_kcq(adev, 0);
4124 static int gfx_v11_0_cp_resume(struct amdgpu_device *adev)
4127 struct amdgpu_ring *ring;
4129 if (!(adev->flags & AMD_IS_APU))
4130 gfx_v11_0_enable_gui_idle_interrupt(adev, false);
4132 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
4133 /* legacy firmware loading */
4134 r = gfx_v11_0_cp_gfx_load_microcode(adev);
4138 if (adev->gfx.rs64_enable)
4139 r = gfx_v11_0_cp_compute_load_microcode_rs64(adev);
4141 r = gfx_v11_0_cp_compute_load_microcode(adev);
4146 gfx_v11_0_cp_set_doorbell_range(adev);
4148 if (amdgpu_async_gfx_ring) {
4149 gfx_v11_0_cp_compute_enable(adev, true);
4150 gfx_v11_0_cp_gfx_enable(adev, true);
4153 if (adev->enable_mes_kiq && adev->mes.kiq_hw_init)
4154 r = amdgpu_mes_kiq_hw_init(adev);
4156 r = gfx_v11_0_kiq_resume(adev);
4160 r = gfx_v11_0_kcq_resume(adev);
4164 if (!amdgpu_async_gfx_ring) {
4165 r = gfx_v11_0_cp_gfx_resume(adev);
4169 r = gfx_v11_0_cp_async_gfx_ring_resume(adev);
4174 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4175 ring = &adev->gfx.gfx_ring[i];
4176 r = amdgpu_ring_test_helper(ring);
4181 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4182 ring = &adev->gfx.compute_ring[i];
4183 r = amdgpu_ring_test_helper(ring);
4191 static void gfx_v11_0_cp_enable(struct amdgpu_device *adev, bool enable)
4193 gfx_v11_0_cp_gfx_enable(adev, enable);
4194 gfx_v11_0_cp_compute_enable(adev, enable);
4197 static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev)
4202 r = adev->gfxhub.funcs->gart_enable(adev);
4206 adev->hdp.funcs->flush_hdp(adev, NULL);
4208 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
4211 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
4212 amdgpu_gmc_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0);
4217 static void gfx_v11_0_select_cp_fw_arch(struct amdgpu_device *adev)
4222 if (adev->gfx.rs64_enable) {
4223 tmp = RREG32_SOC15(GC, 0, regCP_GFX_CNTL);
4224 tmp = REG_SET_FIELD(tmp, CP_GFX_CNTL, ENGINE_SEL, 1);
4225 WREG32_SOC15(GC, 0, regCP_GFX_CNTL, tmp);
4227 tmp = RREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL);
4228 tmp = REG_SET_FIELD(tmp, CP_MEC_ISA_CNTL, ISA_MODE, 1);
4229 WREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL, tmp);
4232 if (amdgpu_emu_mode == 1)
4236 static int get_gb_addr_config(struct amdgpu_device * adev)
4240 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
4241 if (gb_addr_config == 0)
4244 adev->gfx.config.gb_addr_config_fields.num_pkrs =
4245 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
4247 adev->gfx.config.gb_addr_config = gb_addr_config;
4249 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
4250 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4251 GB_ADDR_CONFIG, NUM_PIPES);
4253 adev->gfx.config.max_tile_pipes =
4254 adev->gfx.config.gb_addr_config_fields.num_pipes;
4256 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
4257 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4258 GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS);
4259 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
4260 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4261 GB_ADDR_CONFIG, NUM_RB_PER_SE);
4262 adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
4263 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4264 GB_ADDR_CONFIG, NUM_SHADER_ENGINES);
4265 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
4266 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4267 GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE));
4272 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev)
4276 data = RREG32_SOC15(GC, 0, regCPC_PSP_DEBUG);
4277 data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK;
4278 WREG32_SOC15(GC, 0, regCPC_PSP_DEBUG, data);
4280 data = RREG32_SOC15(GC, 0, regCPG_PSP_DEBUG);
4281 data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK;
4282 WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data);
4285 static int gfx_v11_0_hw_init(void *handle)
4288 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4290 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
4291 if (adev->gfx.imu.funcs) {
4292 /* RLC autoload sequence 1: Program rlc ram */
4293 if (adev->gfx.imu.funcs->program_rlc_ram)
4294 adev->gfx.imu.funcs->program_rlc_ram(adev);
4296 /* rlc autoload firmware */
4297 r = gfx_v11_0_rlc_backdoor_autoload_enable(adev);
4301 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
4302 if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) {
4303 if (adev->gfx.imu.funcs->load_microcode)
4304 adev->gfx.imu.funcs->load_microcode(adev);
4305 if (adev->gfx.imu.funcs->setup_imu)
4306 adev->gfx.imu.funcs->setup_imu(adev);
4307 if (adev->gfx.imu.funcs->start_imu)
4308 adev->gfx.imu.funcs->start_imu(adev);
4311 /* disable gpa mode in backdoor loading */
4312 gfx_v11_0_disable_gpa_mode(adev);
4316 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) ||
4317 (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
4318 r = gfx_v11_0_wait_for_rlc_autoload_complete(adev);
4320 dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r);
4325 adev->gfx.is_poweron = true;
4327 if(get_gb_addr_config(adev))
4328 DRM_WARN("Invalid gb_addr_config !\n");
4330 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
4331 adev->gfx.rs64_enable)
4332 gfx_v11_0_config_gfx_rs64(adev);
4334 r = gfx_v11_0_gfxhub_enable(adev);
4338 if (!amdgpu_emu_mode)
4339 gfx_v11_0_init_golden_registers(adev);
4341 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
4342 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
4344 * For gfx 11, rlc firmware loading relies on smu firmware is
4345 * loaded firstly, so in direct type, it has to load smc ucode
4348 if (!(adev->flags & AMD_IS_APU)) {
4349 r = amdgpu_pm_load_smu_firmware(adev, NULL);
4355 gfx_v11_0_constants_init(adev);
4357 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
4358 gfx_v11_0_select_cp_fw_arch(adev);
4360 if (adev->nbio.funcs->gc_doorbell_init)
4361 adev->nbio.funcs->gc_doorbell_init(adev);
4363 r = gfx_v11_0_rlc_resume(adev);
4368 * init golden registers and rlc resume may override some registers,
4369 * reconfig them here
4371 gfx_v11_0_tcp_harvest(adev);
4373 r = gfx_v11_0_cp_resume(adev);
4380 static int gfx_v11_0_hw_fini(void *handle)
4382 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4384 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4385 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4387 if (!adev->no_hw_access) {
4388 if (amdgpu_async_gfx_ring) {
4389 if (amdgpu_gfx_disable_kgq(adev, 0))
4390 DRM_ERROR("KGQ disable failed\n");
4393 if (amdgpu_gfx_disable_kcq(adev, 0))
4394 DRM_ERROR("KCQ disable failed\n");
4396 amdgpu_mes_kiq_hw_fini(adev);
4399 if (amdgpu_sriov_vf(adev))
4400 /* Remove the steps disabling CPG and clearing KIQ position,
4401 * so that CP could perform IDLE-SAVE during switch. Those
4402 * steps are necessary to avoid a DMAR error in gfx9 but it is
4403 * not reproduced on gfx11.
4407 gfx_v11_0_cp_enable(adev, false);
4408 gfx_v11_0_enable_gui_idle_interrupt(adev, false);
4410 adev->gfxhub.funcs->gart_disable(adev);
4412 adev->gfx.is_poweron = false;
4417 static int gfx_v11_0_suspend(void *handle)
4419 return gfx_v11_0_hw_fini(handle);
4422 static int gfx_v11_0_resume(void *handle)
4424 return gfx_v11_0_hw_init(handle);
4427 static bool gfx_v11_0_is_idle(void *handle)
4429 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4431 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS),
4432 GRBM_STATUS, GUI_ACTIVE))
4438 static int gfx_v11_0_wait_for_idle(void *handle)
4442 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4444 for (i = 0; i < adev->usec_timeout; i++) {
4445 /* read MC_STATUS */
4446 tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS) &
4447 GRBM_STATUS__GUI_ACTIVE_MASK;
4449 if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
4456 static int gfx_v11_0_soft_reset(void *handle)
4458 u32 grbm_soft_reset = 0;
4461 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4463 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
4464 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 0);
4465 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 0);
4466 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 0);
4467 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 0);
4468 WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
4470 gfx_v11_0_set_safe_mode(adev, 0);
4472 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
4473 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
4474 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
4475 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
4476 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i);
4477 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j);
4478 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k);
4479 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
4481 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
4482 WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
4486 for (i = 0; i < adev->gfx.me.num_me; ++i) {
4487 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
4488 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
4489 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
4490 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i);
4491 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j);
4492 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k);
4493 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
4495 WREG32_SOC15(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST, 0x1);
4500 WREG32_SOC15(GC, 0, regCP_VMID_RESET, 0xfffffffe);
4502 // Read CP_VMID_RESET register three times.
4503 // to get sufficient time for GFX_HQD_ACTIVE reach 0
4504 RREG32_SOC15(GC, 0, regCP_VMID_RESET);
4505 RREG32_SOC15(GC, 0, regCP_VMID_RESET);
4506 RREG32_SOC15(GC, 0, regCP_VMID_RESET);
4508 for (i = 0; i < adev->usec_timeout; i++) {
4509 if (!RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) &&
4510 !RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE))
4514 if (i >= adev->usec_timeout) {
4515 printk("Failed to wait all pipes clean\n");
4519 /********** trigger soft reset ***********/
4520 grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
4521 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4523 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4525 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4527 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4529 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4531 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset);
4532 /********** exit soft reset ***********/
4533 grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
4534 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4536 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4538 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4540 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4542 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4544 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset);
4546 tmp = RREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL);
4547 tmp = REG_SET_FIELD(tmp, CP_SOFT_RESET_CNTL, CMP_HQD_REG_RESET, 0x1);
4548 WREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL, tmp);
4550 WREG32_SOC15(GC, 0, regCP_ME_CNTL, 0x0);
4551 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, 0x0);
4553 for (i = 0; i < adev->usec_timeout; i++) {
4554 if (!RREG32_SOC15(GC, 0, regCP_VMID_RESET))
4558 if (i >= adev->usec_timeout) {
4559 printk("Failed to wait CP_VMID_RESET to 0\n");
4563 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
4564 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1);
4565 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1);
4566 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1);
4567 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
4568 WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
4570 gfx_v11_0_unset_safe_mode(adev, 0);
4572 return gfx_v11_0_cp_resume(adev);
4575 static bool gfx_v11_0_check_soft_reset(void *handle)
4578 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4579 struct amdgpu_ring *ring;
4580 long tmo = msecs_to_jiffies(1000);
4582 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4583 ring = &adev->gfx.gfx_ring[i];
4584 r = amdgpu_ring_test_ib(ring, tmo);
4589 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4590 ring = &adev->gfx.compute_ring[i];
4591 r = amdgpu_ring_test_ib(ring, tmo);
4599 static int gfx_v11_0_post_soft_reset(void *handle)
4602 * GFX soft reset will impact MES, need resume MES when do GFX soft reset
4604 return amdgpu_mes_resume((struct amdgpu_device *)handle);
4607 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4610 uint64_t clock_counter_lo, clock_counter_hi_pre, clock_counter_hi_after;
4612 if (amdgpu_sriov_vf(adev)) {
4613 amdgpu_gfx_off_ctrl(adev, false);
4614 mutex_lock(&adev->gfx.gpu_clock_mutex);
4615 clock_counter_hi_pre = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
4616 clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
4617 clock_counter_hi_after = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
4618 if (clock_counter_hi_pre != clock_counter_hi_after)
4619 clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
4620 mutex_unlock(&adev->gfx.gpu_clock_mutex);
4621 amdgpu_gfx_off_ctrl(adev, true);
4624 clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
4625 clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
4626 clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
4627 if (clock_counter_hi_pre != clock_counter_hi_after)
4628 clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
4631 clock = clock_counter_lo | (clock_counter_hi_after << 32ULL);
4636 static void gfx_v11_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4638 uint32_t gds_base, uint32_t gds_size,
4639 uint32_t gws_base, uint32_t gws_size,
4640 uint32_t oa_base, uint32_t oa_size)
4642 struct amdgpu_device *adev = ring->adev;
4645 gfx_v11_0_write_data_to_reg(ring, 0, false,
4646 SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_BASE) + 2 * vmid,
4650 gfx_v11_0_write_data_to_reg(ring, 0, false,
4651 SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_SIZE) + 2 * vmid,
4655 gfx_v11_0_write_data_to_reg(ring, 0, false,
4656 SOC15_REG_OFFSET(GC, 0, regGDS_GWS_VMID0) + vmid,
4657 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4660 gfx_v11_0_write_data_to_reg(ring, 0, false,
4661 SOC15_REG_OFFSET(GC, 0, regGDS_OA_VMID0) + vmid,
4662 (1 << (oa_size + oa_base)) - (1 << oa_base));
4665 static int gfx_v11_0_early_init(void *handle)
4667 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4669 adev->gfx.funcs = &gfx_v11_0_gfx_funcs;
4671 adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS;
4672 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
4673 AMDGPU_MAX_COMPUTE_RINGS);
4675 gfx_v11_0_set_kiq_pm4_funcs(adev);
4676 gfx_v11_0_set_ring_funcs(adev);
4677 gfx_v11_0_set_irq_funcs(adev);
4678 gfx_v11_0_set_gds_init(adev);
4679 gfx_v11_0_set_rlc_funcs(adev);
4680 gfx_v11_0_set_mqd_funcs(adev);
4681 gfx_v11_0_set_imu_funcs(adev);
4683 gfx_v11_0_init_rlcg_reg_access_ctrl(adev);
4685 return gfx_v11_0_init_microcode(adev);
4688 static int gfx_v11_0_late_init(void *handle)
4690 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4693 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4697 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4704 static bool gfx_v11_0_is_rlc_enabled(struct amdgpu_device *adev)
4708 /* if RLC is not enabled, do nothing */
4709 rlc_cntl = RREG32_SOC15(GC, 0, regRLC_CNTL);
4710 return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false;
4713 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
4718 data = RLC_SAFE_MODE__CMD_MASK;
4719 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
4721 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data);
4723 /* wait for RLC_SAFE_MODE */
4724 for (i = 0; i < adev->usec_timeout; i++) {
4725 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, regRLC_SAFE_MODE),
4726 RLC_SAFE_MODE, CMD))
4732 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
4734 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK);
4737 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
4742 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_PERF_CLK))
4745 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4748 data &= ~RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
4750 data |= RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
4753 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4756 static void gfx_v11_0_update_sram_fgcg(struct amdgpu_device *adev,
4761 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
4764 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4767 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
4769 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
4772 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4775 static void gfx_v11_0_update_repeater_fgcg(struct amdgpu_device *adev,
4780 if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
4783 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4786 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK;
4788 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK;
4791 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4794 static void gfx_v11_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
4799 if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)))
4802 /* It is disabled by HW by default */
4804 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
4805 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
4806 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4808 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4809 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
4810 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
4813 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4816 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
4817 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4819 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
4820 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4821 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
4824 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4829 static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
4834 if (!(adev->cg_flags &
4835 (AMD_CG_SUPPORT_GFX_CGCG |
4836 AMD_CG_SUPPORT_GFX_CGLS |
4837 AMD_CG_SUPPORT_GFX_3D_CGCG |
4838 AMD_CG_SUPPORT_GFX_3D_CGLS)))
4842 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4844 /* unset CGCG override */
4845 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
4846 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
4847 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4848 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4849 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG ||
4850 adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
4851 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
4853 /* update CGCG override bits */
4855 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4857 /* enable cgcg FSM(0x0000363F) */
4858 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
4860 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
4861 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK;
4862 data |= (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4863 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4866 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
4867 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK;
4868 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4869 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
4873 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data);
4875 /* Program RLC_CGCG_CGLS_CTRL_3D */
4876 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
4878 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) {
4879 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD_MASK;
4880 data |= (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4881 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4884 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) {
4885 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY_MASK;
4886 data |= (0xf << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4887 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
4891 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data);
4893 /* set IDLE_POLL_COUNT(0x00900100) */
4894 def = data = RREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL);
4896 data &= ~(CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK | CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK);
4897 data |= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4898 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4901 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL, data);
4903 data = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
4904 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1);
4905 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1);
4906 data = REG_SET_FIELD(data, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1);
4907 data = REG_SET_FIELD(data, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
4908 WREG32_SOC15(GC, 0, regCP_INT_CNTL, data);
4910 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL);
4911 data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
4912 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
4914 /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
4915 if (adev->sdma.num_instances > 1) {
4916 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
4917 data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
4918 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
4921 /* Program RLC_CGCG_CGLS_CTRL */
4922 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
4924 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
4925 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4927 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4928 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
4931 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data);
4933 /* Program RLC_CGCG_CGLS_CTRL_3D */
4934 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
4936 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
4937 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4938 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
4939 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
4942 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data);
4944 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL);
4945 data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
4946 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
4948 /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
4949 if (adev->sdma.num_instances > 1) {
4950 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
4951 data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
4952 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
4957 static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev,
4960 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
4962 gfx_v11_0_update_coarse_grain_clock_gating(adev, enable);
4964 gfx_v11_0_update_medium_grain_clock_gating(adev, enable);
4966 gfx_v11_0_update_repeater_fgcg(adev, enable);
4968 gfx_v11_0_update_sram_fgcg(adev, enable);
4970 gfx_v11_0_update_perf_clk(adev, enable);
4972 if (adev->cg_flags &
4973 (AMD_CG_SUPPORT_GFX_MGCG |
4974 AMD_CG_SUPPORT_GFX_CGLS |
4975 AMD_CG_SUPPORT_GFX_CGCG |
4976 AMD_CG_SUPPORT_GFX_3D_CGCG |
4977 AMD_CG_SUPPORT_GFX_3D_CGLS))
4978 gfx_v11_0_enable_gui_idle_interrupt(adev, enable);
4980 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
4985 static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
4989 amdgpu_gfx_off_ctrl(adev, false);
4991 reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL);
4992 if (amdgpu_sriov_is_pp_one_vf(adev))
4993 data = RREG32_NO_KIQ(reg);
4997 data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
4998 data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
5000 if (amdgpu_sriov_is_pp_one_vf(adev))
5001 WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data);
5003 WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data);
5005 amdgpu_gfx_off_ctrl(adev, true);
5008 static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = {
5009 .is_rlc_enabled = gfx_v11_0_is_rlc_enabled,
5010 .set_safe_mode = gfx_v11_0_set_safe_mode,
5011 .unset_safe_mode = gfx_v11_0_unset_safe_mode,
5012 .init = gfx_v11_0_rlc_init,
5013 .get_csb_size = gfx_v11_0_get_csb_size,
5014 .get_csb_buffer = gfx_v11_0_get_csb_buffer,
5015 .resume = gfx_v11_0_rlc_resume,
5016 .stop = gfx_v11_0_rlc_stop,
5017 .reset = gfx_v11_0_rlc_reset,
5018 .start = gfx_v11_0_rlc_start,
5019 .update_spm_vmid = gfx_v11_0_update_spm_vmid,
5022 static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable)
5024 u32 data = RREG32_SOC15(GC, 0, regRLC_PG_CNTL);
5026 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
5027 data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
5029 data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
5031 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, data);
5033 // Program RLC_PG_DELAY3 for CGPG hysteresis
5034 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
5035 switch (adev->ip_versions[GC_HWIP][0]) {
5036 case IP_VERSION(11, 0, 1):
5037 case IP_VERSION(11, 0, 4):
5038 WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1);
5046 static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable)
5048 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5050 gfx_v11_cntl_power_gating(adev, enable);
5052 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5055 static int gfx_v11_0_set_powergating_state(void *handle,
5056 enum amd_powergating_state state)
5058 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5059 bool enable = (state == AMD_PG_STATE_GATE);
5061 if (amdgpu_sriov_vf(adev))
5064 switch (adev->ip_versions[GC_HWIP][0]) {
5065 case IP_VERSION(11, 0, 0):
5066 case IP_VERSION(11, 0, 2):
5067 case IP_VERSION(11, 0, 3):
5068 amdgpu_gfx_off_ctrl(adev, enable);
5070 case IP_VERSION(11, 0, 1):
5071 case IP_VERSION(11, 0, 4):
5073 amdgpu_gfx_off_ctrl(adev, false);
5075 gfx_v11_cntl_pg(adev, enable);
5078 amdgpu_gfx_off_ctrl(adev, true);
5088 static int gfx_v11_0_set_clockgating_state(void *handle,
5089 enum amd_clockgating_state state)
5091 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5093 if (amdgpu_sriov_vf(adev))
5096 switch (adev->ip_versions[GC_HWIP][0]) {
5097 case IP_VERSION(11, 0, 0):
5098 case IP_VERSION(11, 0, 1):
5099 case IP_VERSION(11, 0, 2):
5100 case IP_VERSION(11, 0, 3):
5101 case IP_VERSION(11, 0, 4):
5102 gfx_v11_0_update_gfx_clock_gating(adev,
5103 state == AMD_CG_STATE_GATE);
5112 static void gfx_v11_0_get_clockgating_state(void *handle, u64 *flags)
5114 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5117 /* AMD_CG_SUPPORT_GFX_MGCG */
5118 data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5119 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
5120 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
5122 /* AMD_CG_SUPPORT_REPEATER_FGCG */
5123 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK))
5124 *flags |= AMD_CG_SUPPORT_REPEATER_FGCG;
5126 /* AMD_CG_SUPPORT_GFX_FGCG */
5127 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK))
5128 *flags |= AMD_CG_SUPPORT_GFX_FGCG;
5130 /* AMD_CG_SUPPORT_GFX_PERF_CLK */
5131 if (!(data & RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK))
5132 *flags |= AMD_CG_SUPPORT_GFX_PERF_CLK;
5134 /* AMD_CG_SUPPORT_GFX_CGCG */
5135 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
5136 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5137 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
5139 /* AMD_CG_SUPPORT_GFX_CGLS */
5140 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5141 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
5143 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
5144 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
5145 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
5146 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
5148 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
5149 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
5150 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
5153 static u64 gfx_v11_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
5155 /* gfx11 is 32bit rptr*/
5156 return *(uint32_t *)ring->rptr_cpu_addr;
5159 static u64 gfx_v11_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
5161 struct amdgpu_device *adev = ring->adev;
5164 /* XXX check if swapping is necessary on BE */
5165 if (ring->use_doorbell) {
5166 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5168 wptr = RREG32_SOC15(GC, 0, regCP_RB0_WPTR);
5169 wptr += (u64)RREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI) << 32;
5175 static void gfx_v11_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
5177 struct amdgpu_device *adev = ring->adev;
5178 uint32_t *wptr_saved;
5179 uint32_t *is_queue_unmap;
5180 uint64_t aggregated_db_index;
5181 uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_GFX].mqd_size;
5184 if (ring->is_mes_queue) {
5185 wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
5186 is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
5188 aggregated_db_index =
5189 amdgpu_mes_get_aggregated_doorbell_index(adev,
5192 wptr_tmp = ring->wptr & ring->buf_mask;
5193 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp);
5194 *wptr_saved = wptr_tmp;
5195 /* assume doorbell always being used by mes mapped queue */
5196 if (*is_queue_unmap) {
5197 WDOORBELL64(aggregated_db_index, wptr_tmp);
5198 WDOORBELL64(ring->doorbell_index, wptr_tmp);
5200 WDOORBELL64(ring->doorbell_index, wptr_tmp);
5202 if (*is_queue_unmap)
5203 WDOORBELL64(aggregated_db_index, wptr_tmp);
5206 if (ring->use_doorbell) {
5207 /* XXX check if swapping is necessary on BE */
5208 atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
5210 WDOORBELL64(ring->doorbell_index, ring->wptr);
5212 WREG32_SOC15(GC, 0, regCP_RB0_WPTR,
5213 lower_32_bits(ring->wptr));
5214 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI,
5215 upper_32_bits(ring->wptr));
5220 static u64 gfx_v11_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
5222 /* gfx11 hardware is 32bit rptr */
5223 return *(uint32_t *)ring->rptr_cpu_addr;
5226 static u64 gfx_v11_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
5230 /* XXX check if swapping is necessary on BE */
5231 if (ring->use_doorbell)
5232 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5238 static void gfx_v11_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
5240 struct amdgpu_device *adev = ring->adev;
5241 uint32_t *wptr_saved;
5242 uint32_t *is_queue_unmap;
5243 uint64_t aggregated_db_index;
5244 uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size;
5247 if (ring->is_mes_queue) {
5248 wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
5249 is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
5251 aggregated_db_index =
5252 amdgpu_mes_get_aggregated_doorbell_index(adev,
5255 wptr_tmp = ring->wptr & ring->buf_mask;
5256 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp);
5257 *wptr_saved = wptr_tmp;
5258 /* assume doorbell always used by mes mapped queue */
5259 if (*is_queue_unmap) {
5260 WDOORBELL64(aggregated_db_index, wptr_tmp);
5261 WDOORBELL64(ring->doorbell_index, wptr_tmp);
5263 WDOORBELL64(ring->doorbell_index, wptr_tmp);
5265 if (*is_queue_unmap)
5266 WDOORBELL64(aggregated_db_index, wptr_tmp);
5269 /* XXX check if swapping is necessary on BE */
5270 if (ring->use_doorbell) {
5271 atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
5273 WDOORBELL64(ring->doorbell_index, ring->wptr);
5275 BUG(); /* only DOORBELL method supported on gfx11 now */
5280 static void gfx_v11_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
5282 struct amdgpu_device *adev = ring->adev;
5283 u32 ref_and_mask, reg_mem_engine;
5284 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
5286 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
5289 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
5292 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
5299 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
5300 reg_mem_engine = 1; /* pfp */
5303 gfx_v11_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
5304 adev->nbio.funcs->get_hdp_flush_req_offset(adev),
5305 adev->nbio.funcs->get_hdp_flush_done_offset(adev),
5306 ref_and_mask, ref_and_mask, 0x20);
5309 static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
5310 struct amdgpu_job *job,
5311 struct amdgpu_ib *ib,
5314 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5315 u32 header, control = 0;
5317 BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE);
5319 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
5321 control |= ib->length_dw | (vmid << 24);
5323 if (ring->adev->gfx.mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
5324 control |= INDIRECT_BUFFER_PRE_ENB(1);
5326 if (flags & AMDGPU_IB_PREEMPTED)
5327 control |= INDIRECT_BUFFER_PRE_RESUME(1);
5330 gfx_v11_0_ring_emit_de_meta(ring,
5331 (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
5334 if (ring->is_mes_queue)
5335 /* inherit vmid from mqd */
5336 control |= 0x400000;
5338 amdgpu_ring_write(ring, header);
5339 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5340 amdgpu_ring_write(ring,
5344 lower_32_bits(ib->gpu_addr));
5345 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5346 amdgpu_ring_write(ring, control);
5349 static void gfx_v11_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
5350 struct amdgpu_job *job,
5351 struct amdgpu_ib *ib,
5354 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5355 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
5357 if (ring->is_mes_queue)
5358 /* inherit vmid from mqd */
5359 control |= 0x40000000;
5361 /* Currently, there is a high possibility to get wave ID mismatch
5362 * between ME and GDS, leading to a hw deadlock, because ME generates
5363 * different wave IDs than the GDS expects. This situation happens
5364 * randomly when at least 5 compute pipes use GDS ordered append.
5365 * The wave IDs generated by ME are also wrong after suspend/resume.
5366 * Those are probably bugs somewhere else in the kernel driver.
5368 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
5369 * GDS to 0 for this ring (me/pipe).
5371 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
5372 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
5373 amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
5374 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
5377 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
5378 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5379 amdgpu_ring_write(ring,
5383 lower_32_bits(ib->gpu_addr));
5384 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5385 amdgpu_ring_write(ring, control);
5388 static void gfx_v11_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
5389 u64 seq, unsigned flags)
5391 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
5392 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
5394 /* RELEASE_MEM - flush caches, send int */
5395 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
5396 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ |
5397 PACKET3_RELEASE_MEM_GCR_GL2_WB |
5398 PACKET3_RELEASE_MEM_GCR_GL2_INV |
5399 PACKET3_RELEASE_MEM_GCR_GL2_US |
5400 PACKET3_RELEASE_MEM_GCR_GL1_INV |
5401 PACKET3_RELEASE_MEM_GCR_GLV_INV |
5402 PACKET3_RELEASE_MEM_GCR_GLM_INV |
5403 PACKET3_RELEASE_MEM_GCR_GLM_WB |
5404 PACKET3_RELEASE_MEM_CACHE_POLICY(3) |
5405 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
5406 PACKET3_RELEASE_MEM_EVENT_INDEX(5)));
5407 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) |
5408 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0)));
5411 * the address should be Qword aligned if 64bit write, Dword
5412 * aligned if only send 32bit data low (discard data high)
5418 amdgpu_ring_write(ring, lower_32_bits(addr));
5419 amdgpu_ring_write(ring, upper_32_bits(addr));
5420 amdgpu_ring_write(ring, lower_32_bits(seq));
5421 amdgpu_ring_write(ring, upper_32_bits(seq));
5422 amdgpu_ring_write(ring, ring->is_mes_queue ?
5423 (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0);
5426 static void gfx_v11_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
5428 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5429 uint32_t seq = ring->fence_drv.sync_seq;
5430 uint64_t addr = ring->fence_drv.gpu_addr;
5432 gfx_v11_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr),
5433 upper_32_bits(addr), seq, 0xffffffff, 4);
5436 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
5437 uint16_t pasid, uint32_t flush_type,
5438 bool all_hub, uint8_t dst_sel)
5440 amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
5441 amdgpu_ring_write(ring,
5442 PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) |
5443 PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
5444 PACKET3_INVALIDATE_TLBS_PASID(pasid) |
5445 PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
5448 static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
5449 unsigned vmid, uint64_t pd_addr)
5451 if (ring->is_mes_queue)
5452 gfx_v11_0_ring_invalidate_tlbs(ring, 0, 0, false, 0);
5454 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
5456 /* compute doesn't have PFP */
5457 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
5458 /* sync PFP to ME, otherwise we might get invalid PFP reads */
5459 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5460 amdgpu_ring_write(ring, 0x0);
5464 static void gfx_v11_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
5465 u64 seq, unsigned int flags)
5467 struct amdgpu_device *adev = ring->adev;
5469 /* we only allocate 32bit for each seq wb address */
5470 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
5472 /* write fence seq to the "addr" */
5473 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5474 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5475 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
5476 amdgpu_ring_write(ring, lower_32_bits(addr));
5477 amdgpu_ring_write(ring, upper_32_bits(addr));
5478 amdgpu_ring_write(ring, lower_32_bits(seq));
5480 if (flags & AMDGPU_FENCE_FLAG_INT) {
5481 /* set register to trigger INT */
5482 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5483 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5484 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
5485 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, regCPC_INT_STATUS));
5486 amdgpu_ring_write(ring, 0);
5487 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
5491 static void gfx_v11_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
5496 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
5497 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
5498 /* set load_global_config & load_global_uconfig */
5500 /* set load_cs_sh_regs */
5502 /* set load_per_context_state & load_gfx_sh_regs for GFX */
5506 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5507 amdgpu_ring_write(ring, dw2);
5508 amdgpu_ring_write(ring, 0);
5511 static void gfx_v11_0_ring_emit_gfx_shadow(struct amdgpu_ring *ring,
5512 u64 shadow_va, u64 csa_va,
5513 u64 gds_va, bool init_shadow,
5516 struct amdgpu_device *adev = ring->adev;
5518 if (!adev->gfx.cp_gfx_shadow)
5521 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_Q_PREEMPTION_MODE, 7));
5522 amdgpu_ring_write(ring, lower_32_bits(shadow_va));
5523 amdgpu_ring_write(ring, upper_32_bits(shadow_va));
5524 amdgpu_ring_write(ring, lower_32_bits(gds_va));
5525 amdgpu_ring_write(ring, upper_32_bits(gds_va));
5526 amdgpu_ring_write(ring, lower_32_bits(csa_va));
5527 amdgpu_ring_write(ring, upper_32_bits(csa_va));
5528 amdgpu_ring_write(ring, shadow_va ?
5529 PACKET3_SET_Q_PREEMPTION_MODE_IB_VMID(vmid) : 0);
5530 amdgpu_ring_write(ring, init_shadow ?
5531 PACKET3_SET_Q_PREEMPTION_MODE_INIT_SHADOW_MEM : 0);
5534 static unsigned gfx_v11_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
5538 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
5539 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
5540 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
5541 amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
5542 ret = ring->wptr & ring->buf_mask;
5543 amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
5548 static void gfx_v11_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
5551 BUG_ON(offset > ring->buf_mask);
5552 BUG_ON(ring->ring[offset] != 0x55aa55aa);
5554 cur = (ring->wptr - 1) & ring->buf_mask;
5555 if (likely(cur > offset))
5556 ring->ring[offset] = cur - offset;
5558 ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
5561 static int gfx_v11_0_ring_preempt_ib(struct amdgpu_ring *ring)
5564 struct amdgpu_device *adev = ring->adev;
5565 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
5566 struct amdgpu_ring *kiq_ring = &kiq->ring;
5567 unsigned long flags;
5569 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
5572 spin_lock_irqsave(&kiq->ring_lock, flags);
5574 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
5575 spin_unlock_irqrestore(&kiq->ring_lock, flags);
5579 /* assert preemption condition */
5580 amdgpu_ring_set_preempt_cond_exec(ring, false);
5582 /* assert IB preemption, emit the trailing fence */
5583 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP,
5584 ring->trail_fence_gpu_addr,
5586 amdgpu_ring_commit(kiq_ring);
5588 spin_unlock_irqrestore(&kiq->ring_lock, flags);
5590 /* poll the trailing fence */
5591 for (i = 0; i < adev->usec_timeout; i++) {
5592 if (ring->trail_seq ==
5593 le32_to_cpu(*(ring->trail_fence_cpu_addr)))
5598 if (i >= adev->usec_timeout) {
5600 DRM_ERROR("ring %d failed to preempt ib\n", ring->idx);
5603 /* deassert preemption condition */
5604 amdgpu_ring_set_preempt_cond_exec(ring, true);
5608 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
5610 struct amdgpu_device *adev = ring->adev;
5611 struct v10_de_ib_state de_payload = {0};
5612 uint64_t offset, gds_addr, de_payload_gpu_addr;
5613 void *de_payload_cpu_addr;
5616 if (ring->is_mes_queue) {
5617 offset = offsetof(struct amdgpu_mes_ctx_meta_data,
5618 gfx[0].gfx_meta_data) +
5619 offsetof(struct v10_gfx_meta_data, de_payload);
5620 de_payload_gpu_addr =
5621 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
5622 de_payload_cpu_addr =
5623 amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
5625 offset = offsetof(struct amdgpu_mes_ctx_meta_data,
5626 gfx[0].gds_backup) +
5627 offsetof(struct v10_gfx_meta_data, de_payload);
5628 gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
5630 offset = offsetof(struct v10_gfx_meta_data, de_payload);
5631 de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
5632 de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
5634 gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
5635 AMDGPU_CSA_SIZE - adev->gds.gds_size,
5639 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
5640 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
5642 cnt = (sizeof(de_payload) >> 2) + 4 - 2;
5643 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5644 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5645 WRITE_DATA_DST_SEL(8) |
5647 WRITE_DATA_CACHE_POLICY(0));
5648 amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr));
5649 amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr));
5652 amdgpu_ring_write_multiple(ring, de_payload_cpu_addr,
5653 sizeof(de_payload) >> 2);
5655 amdgpu_ring_write_multiple(ring, (void *)&de_payload,
5656 sizeof(de_payload) >> 2);
5659 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
5662 uint32_t v = secure ? FRAME_TMZ : 0;
5664 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
5665 amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
5668 static void gfx_v11_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
5669 uint32_t reg_val_offs)
5671 struct amdgpu_device *adev = ring->adev;
5673 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
5674 amdgpu_ring_write(ring, 0 | /* src: register*/
5675 (5 << 8) | /* dst: memory */
5676 (1 << 20)); /* write confirm */
5677 amdgpu_ring_write(ring, reg);
5678 amdgpu_ring_write(ring, 0);
5679 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
5681 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
5685 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
5690 switch (ring->funcs->type) {
5691 case AMDGPU_RING_TYPE_GFX:
5692 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
5694 case AMDGPU_RING_TYPE_KIQ:
5695 cmd = (1 << 16); /* no inc addr */
5701 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5702 amdgpu_ring_write(ring, cmd);
5703 amdgpu_ring_write(ring, reg);
5704 amdgpu_ring_write(ring, 0);
5705 amdgpu_ring_write(ring, val);
5708 static void gfx_v11_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
5709 uint32_t val, uint32_t mask)
5711 gfx_v11_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
5714 static void gfx_v11_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
5715 uint32_t reg0, uint32_t reg1,
5716 uint32_t ref, uint32_t mask)
5718 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5720 gfx_v11_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
5724 static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring,
5727 struct amdgpu_device *adev = ring->adev;
5730 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
5731 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
5732 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
5733 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
5734 WREG32_SOC15(GC, 0, regSQ_CMD, value);
5738 gfx_v11_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
5739 uint32_t me, uint32_t pipe,
5740 enum amdgpu_interrupt_state state)
5742 uint32_t cp_int_cntl, cp_int_cntl_reg;
5747 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0);
5750 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1);
5753 DRM_DEBUG("invalid pipe %d\n", pipe);
5757 DRM_DEBUG("invalid me %d\n", me);
5762 case AMDGPU_IRQ_STATE_DISABLE:
5763 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
5764 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
5765 TIME_STAMP_INT_ENABLE, 0);
5766 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
5767 GENERIC0_INT_ENABLE, 0);
5768 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
5770 case AMDGPU_IRQ_STATE_ENABLE:
5771 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
5772 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
5773 TIME_STAMP_INT_ENABLE, 1);
5774 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
5775 GENERIC0_INT_ENABLE, 1);
5776 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
5783 static void gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
5785 enum amdgpu_interrupt_state state)
5787 u32 mec_int_cntl, mec_int_cntl_reg;
5790 * amdgpu controls only the first MEC. That's why this function only
5791 * handles the setting of interrupts for this specific MEC. All other
5792 * pipes' interrupts are set by amdkfd.
5798 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
5801 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL);
5804 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL);
5807 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL);
5810 DRM_DEBUG("invalid pipe %d\n", pipe);
5814 DRM_DEBUG("invalid me %d\n", me);
5819 case AMDGPU_IRQ_STATE_DISABLE:
5820 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
5821 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5822 TIME_STAMP_INT_ENABLE, 0);
5823 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5824 GENERIC0_INT_ENABLE, 0);
5825 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
5827 case AMDGPU_IRQ_STATE_ENABLE:
5828 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
5829 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5830 TIME_STAMP_INT_ENABLE, 1);
5831 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5832 GENERIC0_INT_ENABLE, 1);
5833 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
5840 static int gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev,
5841 struct amdgpu_irq_src *src,
5843 enum amdgpu_interrupt_state state)
5846 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
5847 gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 0, state);
5849 case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP:
5850 gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 1, state);
5852 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
5853 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
5855 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
5856 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
5858 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
5859 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
5861 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
5862 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
5870 static int gfx_v11_0_eop_irq(struct amdgpu_device *adev,
5871 struct amdgpu_irq_src *source,
5872 struct amdgpu_iv_entry *entry)
5875 u8 me_id, pipe_id, queue_id;
5876 struct amdgpu_ring *ring;
5877 uint32_t mes_queue_id = entry->src_data[0];
5879 DRM_DEBUG("IH: CP EOP\n");
5881 if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
5882 struct amdgpu_mes_queue *queue;
5884 mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
5886 spin_lock(&adev->mes.queue_id_lock);
5887 queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
5889 DRM_DEBUG("process mes queue id = %d\n", mes_queue_id);
5890 amdgpu_fence_process(queue->ring);
5892 spin_unlock(&adev->mes.queue_id_lock);
5894 me_id = (entry->ring_id & 0x0c) >> 2;
5895 pipe_id = (entry->ring_id & 0x03) >> 0;
5896 queue_id = (entry->ring_id & 0x70) >> 4;
5901 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
5903 amdgpu_fence_process(&adev->gfx.gfx_ring[1]);
5907 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5908 ring = &adev->gfx.compute_ring[i];
5909 /* Per-queue interrupt is supported for MEC starting from VI.
5910 * The interrupt can only be enabled/disabled per pipe instead
5913 if ((ring->me == me_id) &&
5914 (ring->pipe == pipe_id) &&
5915 (ring->queue == queue_id))
5916 amdgpu_fence_process(ring);
5925 static int gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
5926 struct amdgpu_irq_src *source,
5928 enum amdgpu_interrupt_state state)
5931 case AMDGPU_IRQ_STATE_DISABLE:
5932 case AMDGPU_IRQ_STATE_ENABLE:
5933 WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0,
5934 PRIV_REG_INT_ENABLE,
5935 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5944 static int gfx_v11_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
5945 struct amdgpu_irq_src *source,
5947 enum amdgpu_interrupt_state state)
5950 case AMDGPU_IRQ_STATE_DISABLE:
5951 case AMDGPU_IRQ_STATE_ENABLE:
5952 WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0,
5953 PRIV_INSTR_INT_ENABLE,
5954 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5963 static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev,
5964 struct amdgpu_iv_entry *entry)
5966 u8 me_id, pipe_id, queue_id;
5967 struct amdgpu_ring *ring;
5970 me_id = (entry->ring_id & 0x0c) >> 2;
5971 pipe_id = (entry->ring_id & 0x03) >> 0;
5972 queue_id = (entry->ring_id & 0x70) >> 4;
5976 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
5977 ring = &adev->gfx.gfx_ring[i];
5978 /* we only enabled 1 gfx queue per pipe for now */
5979 if (ring->me == me_id && ring->pipe == pipe_id)
5980 drm_sched_fault(&ring->sched);
5985 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5986 ring = &adev->gfx.compute_ring[i];
5987 if (ring->me == me_id && ring->pipe == pipe_id &&
5988 ring->queue == queue_id)
5989 drm_sched_fault(&ring->sched);
5998 static int gfx_v11_0_priv_reg_irq(struct amdgpu_device *adev,
5999 struct amdgpu_irq_src *source,
6000 struct amdgpu_iv_entry *entry)
6002 DRM_ERROR("Illegal register access in command stream\n");
6003 gfx_v11_0_handle_priv_fault(adev, entry);
6007 static int gfx_v11_0_priv_inst_irq(struct amdgpu_device *adev,
6008 struct amdgpu_irq_src *source,
6009 struct amdgpu_iv_entry *entry)
6011 DRM_ERROR("Illegal instruction in command stream\n");
6012 gfx_v11_0_handle_priv_fault(adev, entry);
6016 static int gfx_v11_0_rlc_gc_fed_irq(struct amdgpu_device *adev,
6017 struct amdgpu_irq_src *source,
6018 struct amdgpu_iv_entry *entry)
6020 if (adev->gfx.ras && adev->gfx.ras->rlc_gc_fed_irq)
6021 return adev->gfx.ras->rlc_gc_fed_irq(adev, source, entry);
6027 static int gfx_v11_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
6028 struct amdgpu_irq_src *src,
6030 enum amdgpu_interrupt_state state)
6032 uint32_t tmp, target;
6033 struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring);
6035 target = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
6036 target += ring->pipe;
6039 case AMDGPU_CP_KIQ_IRQ_DRIVER0:
6040 if (state == AMDGPU_IRQ_STATE_DISABLE) {
6041 tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL);
6042 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
6043 GENERIC2_INT_ENABLE, 0);
6044 WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp);
6046 tmp = RREG32_SOC15_IP(GC, target);
6047 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL,
6048 GENERIC2_INT_ENABLE, 0);
6049 WREG32_SOC15_IP(GC, target, tmp);
6051 tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL);
6052 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
6053 GENERIC2_INT_ENABLE, 1);
6054 WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp);
6056 tmp = RREG32_SOC15_IP(GC, target);
6057 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL,
6058 GENERIC2_INT_ENABLE, 1);
6059 WREG32_SOC15_IP(GC, target, tmp);
6063 BUG(); /* kiq only support GENERIC2_INT now */
6070 static void gfx_v11_0_emit_mem_sync(struct amdgpu_ring *ring)
6072 const unsigned int gcr_cntl =
6073 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) |
6074 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) |
6075 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) |
6076 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) |
6077 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) |
6078 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) |
6079 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) |
6080 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1);
6082 /* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */
6083 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6));
6084 amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */
6085 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
6086 amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */
6087 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
6088 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
6089 amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
6090 amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
6093 static const struct amd_ip_funcs gfx_v11_0_ip_funcs = {
6094 .name = "gfx_v11_0",
6095 .early_init = gfx_v11_0_early_init,
6096 .late_init = gfx_v11_0_late_init,
6097 .sw_init = gfx_v11_0_sw_init,
6098 .sw_fini = gfx_v11_0_sw_fini,
6099 .hw_init = gfx_v11_0_hw_init,
6100 .hw_fini = gfx_v11_0_hw_fini,
6101 .suspend = gfx_v11_0_suspend,
6102 .resume = gfx_v11_0_resume,
6103 .is_idle = gfx_v11_0_is_idle,
6104 .wait_for_idle = gfx_v11_0_wait_for_idle,
6105 .soft_reset = gfx_v11_0_soft_reset,
6106 .check_soft_reset = gfx_v11_0_check_soft_reset,
6107 .post_soft_reset = gfx_v11_0_post_soft_reset,
6108 .set_clockgating_state = gfx_v11_0_set_clockgating_state,
6109 .set_powergating_state = gfx_v11_0_set_powergating_state,
6110 .get_clockgating_state = gfx_v11_0_get_clockgating_state,
6113 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
6114 .type = AMDGPU_RING_TYPE_GFX,
6116 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6117 .support_64bit_ptrs = true,
6118 .secure_submission_supported = true,
6119 .get_rptr = gfx_v11_0_ring_get_rptr_gfx,
6120 .get_wptr = gfx_v11_0_ring_get_wptr_gfx,
6121 .set_wptr = gfx_v11_0_ring_set_wptr_gfx,
6122 .emit_frame_size = /* totally 242 maximum if 16 IBs */
6124 9 + /* SET_Q_PREEMPTION_MODE */
6125 7 + /* PIPELINE_SYNC */
6126 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6127 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6129 8 + /* FENCE for VM_FLUSH */
6130 20 + /* GDS switch */
6137 8 + 8 + /* FENCE x2 */
6138 8, /* gfx_v11_0_emit_mem_sync */
6139 .emit_ib_size = 4, /* gfx_v11_0_ring_emit_ib_gfx */
6140 .emit_ib = gfx_v11_0_ring_emit_ib_gfx,
6141 .emit_fence = gfx_v11_0_ring_emit_fence,
6142 .emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync,
6143 .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush,
6144 .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch,
6145 .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
6146 .test_ring = gfx_v11_0_ring_test_ring,
6147 .test_ib = gfx_v11_0_ring_test_ib,
6148 .insert_nop = amdgpu_ring_insert_nop,
6149 .pad_ib = amdgpu_ring_generic_pad_ib,
6150 .emit_cntxcntl = gfx_v11_0_ring_emit_cntxcntl,
6151 .emit_gfx_shadow = gfx_v11_0_ring_emit_gfx_shadow,
6152 .init_cond_exec = gfx_v11_0_ring_emit_init_cond_exec,
6153 .patch_cond_exec = gfx_v11_0_ring_emit_patch_cond_exec,
6154 .preempt_ib = gfx_v11_0_ring_preempt_ib,
6155 .emit_frame_cntl = gfx_v11_0_ring_emit_frame_cntl,
6156 .emit_wreg = gfx_v11_0_ring_emit_wreg,
6157 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
6158 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
6159 .soft_recovery = gfx_v11_0_ring_soft_recovery,
6160 .emit_mem_sync = gfx_v11_0_emit_mem_sync,
6163 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
6164 .type = AMDGPU_RING_TYPE_COMPUTE,
6166 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6167 .support_64bit_ptrs = true,
6168 .get_rptr = gfx_v11_0_ring_get_rptr_compute,
6169 .get_wptr = gfx_v11_0_ring_get_wptr_compute,
6170 .set_wptr = gfx_v11_0_ring_set_wptr_compute,
6172 20 + /* gfx_v11_0_ring_emit_gds_switch */
6173 7 + /* gfx_v11_0_ring_emit_hdp_flush */
6174 5 + /* hdp invalidate */
6175 7 + /* gfx_v11_0_ring_emit_pipeline_sync */
6176 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6177 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6178 2 + /* gfx_v11_0_ring_emit_vm_flush */
6179 8 + 8 + 8 + /* gfx_v11_0_ring_emit_fence x3 for user fence, vm fence */
6180 8, /* gfx_v11_0_emit_mem_sync */
6181 .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */
6182 .emit_ib = gfx_v11_0_ring_emit_ib_compute,
6183 .emit_fence = gfx_v11_0_ring_emit_fence,
6184 .emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync,
6185 .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush,
6186 .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch,
6187 .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
6188 .test_ring = gfx_v11_0_ring_test_ring,
6189 .test_ib = gfx_v11_0_ring_test_ib,
6190 .insert_nop = amdgpu_ring_insert_nop,
6191 .pad_ib = amdgpu_ring_generic_pad_ib,
6192 .emit_wreg = gfx_v11_0_ring_emit_wreg,
6193 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
6194 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
6195 .emit_mem_sync = gfx_v11_0_emit_mem_sync,
6198 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
6199 .type = AMDGPU_RING_TYPE_KIQ,
6201 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6202 .support_64bit_ptrs = true,
6203 .get_rptr = gfx_v11_0_ring_get_rptr_compute,
6204 .get_wptr = gfx_v11_0_ring_get_wptr_compute,
6205 .set_wptr = gfx_v11_0_ring_set_wptr_compute,
6207 20 + /* gfx_v11_0_ring_emit_gds_switch */
6208 7 + /* gfx_v11_0_ring_emit_hdp_flush */
6209 5 + /*hdp invalidate */
6210 7 + /* gfx_v11_0_ring_emit_pipeline_sync */
6211 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6212 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6213 2 + /* gfx_v11_0_ring_emit_vm_flush */
6214 8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */
6215 .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */
6216 .emit_ib = gfx_v11_0_ring_emit_ib_compute,
6217 .emit_fence = gfx_v11_0_ring_emit_fence_kiq,
6218 .test_ring = gfx_v11_0_ring_test_ring,
6219 .test_ib = gfx_v11_0_ring_test_ib,
6220 .insert_nop = amdgpu_ring_insert_nop,
6221 .pad_ib = amdgpu_ring_generic_pad_ib,
6222 .emit_rreg = gfx_v11_0_ring_emit_rreg,
6223 .emit_wreg = gfx_v11_0_ring_emit_wreg,
6224 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
6225 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
6228 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev)
6232 adev->gfx.kiq[0].ring.funcs = &gfx_v11_0_ring_funcs_kiq;
6234 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
6235 adev->gfx.gfx_ring[i].funcs = &gfx_v11_0_ring_funcs_gfx;
6237 for (i = 0; i < adev->gfx.num_compute_rings; i++)
6238 adev->gfx.compute_ring[i].funcs = &gfx_v11_0_ring_funcs_compute;
6241 static const struct amdgpu_irq_src_funcs gfx_v11_0_eop_irq_funcs = {
6242 .set = gfx_v11_0_set_eop_interrupt_state,
6243 .process = gfx_v11_0_eop_irq,
6246 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_reg_irq_funcs = {
6247 .set = gfx_v11_0_set_priv_reg_fault_state,
6248 .process = gfx_v11_0_priv_reg_irq,
6251 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = {
6252 .set = gfx_v11_0_set_priv_inst_fault_state,
6253 .process = gfx_v11_0_priv_inst_irq,
6256 static const struct amdgpu_irq_src_funcs gfx_v11_0_rlc_gc_fed_irq_funcs = {
6257 .process = gfx_v11_0_rlc_gc_fed_irq,
6260 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev)
6262 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
6263 adev->gfx.eop_irq.funcs = &gfx_v11_0_eop_irq_funcs;
6265 adev->gfx.priv_reg_irq.num_types = 1;
6266 adev->gfx.priv_reg_irq.funcs = &gfx_v11_0_priv_reg_irq_funcs;
6268 adev->gfx.priv_inst_irq.num_types = 1;
6269 adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs;
6271 adev->gfx.rlc_gc_fed_irq.num_types = 1; /* 0x80 FED error */
6272 adev->gfx.rlc_gc_fed_irq.funcs = &gfx_v11_0_rlc_gc_fed_irq_funcs;
6276 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev)
6278 if (adev->flags & AMD_IS_APU)
6279 adev->gfx.imu.mode = MISSION_MODE;
6281 adev->gfx.imu.mode = DEBUG_MODE;
6283 adev->gfx.imu.funcs = &gfx_v11_0_imu_funcs;
6286 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev)
6288 adev->gfx.rlc.funcs = &gfx_v11_0_rlc_funcs;
6291 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev)
6293 unsigned total_cu = adev->gfx.config.max_cu_per_sh *
6294 adev->gfx.config.max_sh_per_se *
6295 adev->gfx.config.max_shader_engines;
6297 adev->gds.gds_size = 0x1000;
6298 adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1;
6299 adev->gds.gws_size = 64;
6300 adev->gds.oa_size = 16;
6303 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev)
6305 /* set gfx eng mqd */
6306 adev->mqds[AMDGPU_HW_IP_GFX].mqd_size =
6307 sizeof(struct v11_gfx_mqd);
6308 adev->mqds[AMDGPU_HW_IP_GFX].init_mqd =
6309 gfx_v11_0_gfx_mqd_init;
6310 /* set compute eng mqd */
6311 adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size =
6312 sizeof(struct v11_compute_mqd);
6313 adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd =
6314 gfx_v11_0_compute_mqd_init;
6317 static void gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev,
6325 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
6326 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
6328 WREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG, data);
6331 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev)
6333 u32 data, wgp_bitmask;
6334 data = RREG32_SOC15(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG);
6335 data |= RREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG);
6337 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
6338 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
6341 amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1);
6343 return (~data) & wgp_bitmask;
6346 static u32 gfx_v11_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev)
6348 u32 wgp_idx, wgp_active_bitmap;
6349 u32 cu_bitmap_per_wgp, cu_active_bitmap;
6351 wgp_active_bitmap = gfx_v11_0_get_wgp_active_bitmap_per_sh(adev);
6352 cu_active_bitmap = 0;
6354 for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) {
6355 /* if there is one WGP enabled, it means 2 CUs will be enabled */
6356 cu_bitmap_per_wgp = 3 << (2 * wgp_idx);
6357 if (wgp_active_bitmap & (1 << wgp_idx))
6358 cu_active_bitmap |= cu_bitmap_per_wgp;
6361 return cu_active_bitmap;
6364 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
6365 struct amdgpu_cu_info *cu_info)
6367 int i, j, k, counter, active_cu_number = 0;
6369 unsigned disable_masks[8 * 2];
6371 if (!adev || !cu_info)
6374 amdgpu_gfx_parse_disable_cu(disable_masks, 8, 2);
6376 mutex_lock(&adev->grbm_idx_mutex);
6377 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
6378 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
6381 gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff, 0);
6383 gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(
6384 adev, disable_masks[i * 2 + j]);
6385 bitmap = gfx_v11_0_get_cu_active_bitmap_per_sh(adev);
6388 * GFX11 could support more than 4 SEs, while the bitmap
6389 * in cu_info struct is 4x4 and ioctl interface struct
6390 * drm_amdgpu_info_device should keep stable.
6391 * So we use last two columns of bitmap to store cu mask for
6392 * SEs 4 to 7, the layout of the bitmap is as below:
6393 * SE0: {SH0,SH1} --> {bitmap[0][0], bitmap[0][1]}
6394 * SE1: {SH0,SH1} --> {bitmap[1][0], bitmap[1][1]}
6395 * SE2: {SH0,SH1} --> {bitmap[2][0], bitmap[2][1]}
6396 * SE3: {SH0,SH1} --> {bitmap[3][0], bitmap[3][1]}
6397 * SE4: {SH0,SH1} --> {bitmap[0][2], bitmap[0][3]}
6398 * SE5: {SH0,SH1} --> {bitmap[1][2], bitmap[1][3]}
6399 * SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]}
6400 * SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]}
6402 cu_info->bitmap[i % 4][j + (i / 4) * 2] = bitmap;
6404 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
6410 active_cu_number += counter;
6413 gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
6414 mutex_unlock(&adev->grbm_idx_mutex);
6416 cu_info->number = active_cu_number;
6417 cu_info->simd_per_cu = NUM_SIMD_PER_CU;
6422 const struct amdgpu_ip_block_version gfx_v11_0_ip_block =
6424 .type = AMD_IP_BLOCK_TYPE_GFX,
6428 .funcs = &gfx_v11_0_ip_funcs,