2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/delay.h>
25 #include <linux/kernel.h>
26 #include <linux/firmware.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
30 #include "amdgpu_gfx.h"
31 #include "amdgpu_psp.h"
32 #include "amdgpu_smu.h"
36 #include "gc/gc_10_1_0_offset.h"
37 #include "gc/gc_10_1_0_sh_mask.h"
38 #include "navi10_enum.h"
39 #include "hdp/hdp_5_0_0_offset.h"
40 #include "ivsrcid/gfx/irqsrcs_gfx_10_1.h"
43 #include "soc15_common.h"
44 #include "clearstate_gfx10.h"
45 #include "v10_structs.h"
46 #include "gfx_v10_0.h"
47 #include "nbio_v2_3.h"
50 * Navi10 has two graphic rings to share each graphic pipe.
54 * In bring-up phase, it just used primary ring so set gfx ring number as 1 at
57 #define GFX10_NUM_GFX_RINGS 2
58 #define GFX10_MEC_HPD_SIZE 2048
60 #define F32_CE_PROGRAM_RAM_SIZE 65536
61 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
63 #define mmCGTT_GS_NGG_CLK_CTRL 0x5087
64 #define mmCGTT_GS_NGG_CLK_CTRL_BASE_IDX 1
66 MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
67 MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
68 MODULE_FIRMWARE("amdgpu/navi10_me.bin");
69 MODULE_FIRMWARE("amdgpu/navi10_mec.bin");
70 MODULE_FIRMWARE("amdgpu/navi10_mec2.bin");
71 MODULE_FIRMWARE("amdgpu/navi10_rlc.bin");
73 MODULE_FIRMWARE("amdgpu/navi14_ce.bin");
74 MODULE_FIRMWARE("amdgpu/navi14_pfp.bin");
75 MODULE_FIRMWARE("amdgpu/navi14_me.bin");
76 MODULE_FIRMWARE("amdgpu/navi14_mec.bin");
77 MODULE_FIRMWARE("amdgpu/navi14_mec2.bin");
78 MODULE_FIRMWARE("amdgpu/navi14_rlc.bin");
80 MODULE_FIRMWARE("amdgpu/navi12_ce.bin");
81 MODULE_FIRMWARE("amdgpu/navi12_pfp.bin");
82 MODULE_FIRMWARE("amdgpu/navi12_me.bin");
83 MODULE_FIRMWARE("amdgpu/navi12_mec.bin");
84 MODULE_FIRMWARE("amdgpu/navi12_mec2.bin");
85 MODULE_FIRMWARE("amdgpu/navi12_rlc.bin");
87 static const struct soc15_reg_golden golden_settings_gc_10_1[] =
89 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
90 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100),
91 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xc0000000, 0xc0000100),
92 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0x60000ff0, 0x60000100),
93 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000000, 0x40000100),
94 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
95 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_WD_CLK_CTRL, 0xfeff8fff, 0xfeff8100),
96 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
97 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_VC5_ENABLE, 0x00000002, 0x00000000),
98 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0x000007ff, 0x000005ff),
99 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0x20000000, 0x20000000),
100 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
101 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x00000200, 0x00000200),
102 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0x07900000, 0x04900000),
103 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
104 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
105 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
106 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x000007ff, 0x000001fe),
107 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
108 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x10321032),
109 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x02310231),
110 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
111 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
112 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0x10000000, 0x10000100),
113 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f),
114 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffff9fff, 0x00001188),
115 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009),
116 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00400000, 0x04440000),
117 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
118 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
119 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
120 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000100, 0x00000130),
121 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
122 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
123 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010),
124 SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CGTT_CLK_CTRL, 0xfeff0fff, 0x40000100),
125 SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000)
128 static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] =
130 /* Pending on emulation bring up */
133 static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
135 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x003c0014),
136 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
137 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
138 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xc0000000, 0xc0000100),
139 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xf8ff0fff, 0x60000100),
140 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000ff0, 0x40000100),
141 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
142 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_WD_CLK_CTRL, 0xffff8fff, 0xffff8100),
143 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
144 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_VC5_ENABLE, 0x00000002, 0x00000000),
145 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0x800007ff, 0x000005ff),
146 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000),
147 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
148 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x00000200, 0x00000200),
149 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04900000),
150 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
151 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
152 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
153 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x000007ff, 0x000001fe),
154 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
155 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffe7),
156 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffe7),
157 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0xffff0fff, 0x10000100),
158 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f),
159 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffffbfff, 0x00000188),
160 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009),
161 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00400000, 0x04440000),
162 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
163 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
164 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
165 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
166 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
167 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
168 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010),
169 SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000),
172 static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
174 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0x003e001f, 0x003c0014),
175 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
176 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
177 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xff7f0fff, 0xc0000100),
178 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xffffcfff, 0x60000100),
179 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0xffff0fff, 0x40000100),
180 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
181 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_WD_CLK_CTRL, 0xffff8fff, 0xffff8100),
182 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
183 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_VC5_ENABLE, 0x00000003, 0x00000000),
184 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0x800007ff, 0x000005ff),
185 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000),
186 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
187 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200),
188 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04800000),
189 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
190 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
191 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
192 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x00007fff, 0x000001fe),
193 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
194 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x10321032),
195 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x02310231),
196 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
197 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
198 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0xffff0fff, 0x10000100),
199 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f),
200 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffffbfff, 0x00000188),
201 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_0, 0xffffffff, 0x842a4c02),
202 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800),
203 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009),
204 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04440000),
205 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000820, 0x00000820),
206 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
207 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
208 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
209 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
210 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
211 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
212 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0xffdf80ff, 0x479c0010),
213 SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000)
216 static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] =
218 /* Pending on emulation bring up */
221 static const struct soc15_reg_golden golden_settings_gc_10_1_2_nv12[] =
223 /* Pending on emulation bring up */
226 #define DEFAULT_SH_MEM_CONFIG \
227 ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
228 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
229 (SH_MEM_RETRY_MODE_ALL << SH_MEM_CONFIG__RETRY_MODE__SHIFT) | \
230 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
233 static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev);
234 static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev);
235 static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev);
236 static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev);
237 static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
238 struct amdgpu_cu_info *cu_info);
239 static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev);
240 static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
241 u32 sh_num, u32 instance);
242 static u32 gfx_v10_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev);
244 static int gfx_v10_0_rlc_backdoor_autoload_buffer_init(struct amdgpu_device *adev);
245 static void gfx_v10_0_rlc_backdoor_autoload_buffer_fini(struct amdgpu_device *adev);
246 static int gfx_v10_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev);
247 static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
248 static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume);
249 static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
250 static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start);
252 static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
254 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
255 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
256 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
257 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
258 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
259 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
260 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
261 amdgpu_ring_write(kiq_ring, 0); /* oac mask */
262 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
265 static void gfx10_kiq_map_queues(struct amdgpu_ring *kiq_ring,
266 struct amdgpu_ring *ring)
268 struct amdgpu_device *adev = kiq_ring->adev;
269 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
270 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
271 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
273 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
274 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
275 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
276 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
277 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
278 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
279 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
280 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
281 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
282 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
283 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
284 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
285 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
286 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
287 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
288 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
289 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
292 static void gfx10_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
293 struct amdgpu_ring *ring,
294 enum amdgpu_unmap_queues_action action,
295 u64 gpu_addr, u64 seq)
297 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
299 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
300 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
301 PACKET3_UNMAP_QUEUES_ACTION(action) |
302 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
303 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
304 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
305 amdgpu_ring_write(kiq_ring,
306 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
308 if (action == PREEMPT_QUEUES_NO_UNMAP) {
309 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
310 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
311 amdgpu_ring_write(kiq_ring, seq);
313 amdgpu_ring_write(kiq_ring, 0);
314 amdgpu_ring_write(kiq_ring, 0);
315 amdgpu_ring_write(kiq_ring, 0);
319 static void gfx10_kiq_query_status(struct amdgpu_ring *kiq_ring,
320 struct amdgpu_ring *ring,
324 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
326 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
327 amdgpu_ring_write(kiq_ring,
328 PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
329 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
330 PACKET3_QUERY_STATUS_COMMAND(2));
331 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
332 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
333 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
334 amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
335 amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
336 amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
337 amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
340 static const struct kiq_pm4_funcs gfx_v10_0_kiq_pm4_funcs = {
341 .kiq_set_resources = gfx10_kiq_set_resources,
342 .kiq_map_queues = gfx10_kiq_map_queues,
343 .kiq_unmap_queues = gfx10_kiq_unmap_queues,
344 .kiq_query_status = gfx10_kiq_query_status,
345 .set_resources_size = 8,
346 .map_queues_size = 7,
347 .unmap_queues_size = 6,
348 .query_status_size = 7,
351 static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
353 adev->gfx.kiq.pmf = &gfx_v10_0_kiq_pm4_funcs;
356 static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
358 switch (adev->asic_type) {
360 soc15_program_register_sequence(adev,
361 golden_settings_gc_10_1,
362 (const u32)ARRAY_SIZE(golden_settings_gc_10_1));
363 soc15_program_register_sequence(adev,
364 golden_settings_gc_10_0_nv10,
365 (const u32)ARRAY_SIZE(golden_settings_gc_10_0_nv10));
368 soc15_program_register_sequence(adev,
369 golden_settings_gc_10_1_1,
370 (const u32)ARRAY_SIZE(golden_settings_gc_10_1_1));
371 soc15_program_register_sequence(adev,
372 golden_settings_gc_10_1_nv14,
373 (const u32)ARRAY_SIZE(golden_settings_gc_10_1_nv14));
376 soc15_program_register_sequence(adev,
377 golden_settings_gc_10_1_2,
378 (const u32)ARRAY_SIZE(golden_settings_gc_10_1_2));
379 soc15_program_register_sequence(adev,
380 golden_settings_gc_10_1_2_nv12,
381 (const u32)ARRAY_SIZE(golden_settings_gc_10_1_2_nv12));
388 static void gfx_v10_0_scratch_init(struct amdgpu_device *adev)
390 adev->gfx.scratch.num_reg = 8;
391 adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
392 adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
395 static void gfx_v10_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
396 bool wc, uint32_t reg, uint32_t val)
398 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
399 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
400 WRITE_DATA_DST_SEL(0) | (wc ? WR_CONFIRM : 0));
401 amdgpu_ring_write(ring, reg);
402 amdgpu_ring_write(ring, 0);
403 amdgpu_ring_write(ring, val);
406 static void gfx_v10_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
407 int mem_space, int opt, uint32_t addr0,
408 uint32_t addr1, uint32_t ref, uint32_t mask,
411 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
412 amdgpu_ring_write(ring,
413 /* memory (1) or register (0) */
414 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
415 WAIT_REG_MEM_OPERATION(opt) | /* wait */
416 WAIT_REG_MEM_FUNCTION(3) | /* equal */
417 WAIT_REG_MEM_ENGINE(eng_sel)));
420 BUG_ON(addr0 & 0x3); /* Dword align */
421 amdgpu_ring_write(ring, addr0);
422 amdgpu_ring_write(ring, addr1);
423 amdgpu_ring_write(ring, ref);
424 amdgpu_ring_write(ring, mask);
425 amdgpu_ring_write(ring, inv); /* poll interval */
428 static int gfx_v10_0_ring_test_ring(struct amdgpu_ring *ring)
430 struct amdgpu_device *adev = ring->adev;
436 r = amdgpu_gfx_scratch_get(adev, &scratch);
438 DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
442 WREG32(scratch, 0xCAFEDEAD);
444 r = amdgpu_ring_alloc(ring, 3);
446 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
448 amdgpu_gfx_scratch_free(adev, scratch);
452 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
453 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
454 amdgpu_ring_write(ring, 0xDEADBEEF);
455 amdgpu_ring_commit(ring);
457 for (i = 0; i < adev->usec_timeout; i++) {
458 tmp = RREG32(scratch);
459 if (tmp == 0xDEADBEEF)
461 if (amdgpu_emu_mode == 1)
466 if (i < adev->usec_timeout) {
467 if (amdgpu_emu_mode == 1)
468 DRM_INFO("ring test on %d succeeded in %d msecs\n",
471 DRM_INFO("ring test on %d succeeded in %d usecs\n",
474 DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
475 ring->idx, scratch, tmp);
478 amdgpu_gfx_scratch_free(adev, scratch);
483 static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
485 struct amdgpu_device *adev = ring->adev;
487 struct dma_fence *f = NULL;
492 r = amdgpu_gfx_scratch_get(adev, &scratch);
494 DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
498 WREG32(scratch, 0xCAFEDEAD);
500 memset(&ib, 0, sizeof(ib));
501 r = amdgpu_ib_get(adev, NULL, 256, &ib);
503 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
507 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
508 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
509 ib.ptr[2] = 0xDEADBEEF;
512 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
516 r = dma_fence_wait_timeout(f, false, timeout);
518 DRM_ERROR("amdgpu: IB test timed out.\n");
522 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
526 tmp = RREG32(scratch);
527 if (tmp == 0xDEADBEEF) {
528 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
531 DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
536 amdgpu_ib_free(adev, &ib, NULL);
539 amdgpu_gfx_scratch_free(adev, scratch);
544 static void gfx_v10_0_free_microcode(struct amdgpu_device *adev)
546 release_firmware(adev->gfx.pfp_fw);
547 adev->gfx.pfp_fw = NULL;
548 release_firmware(adev->gfx.me_fw);
549 adev->gfx.me_fw = NULL;
550 release_firmware(adev->gfx.ce_fw);
551 adev->gfx.ce_fw = NULL;
552 release_firmware(adev->gfx.rlc_fw);
553 adev->gfx.rlc_fw = NULL;
554 release_firmware(adev->gfx.mec_fw);
555 adev->gfx.mec_fw = NULL;
556 release_firmware(adev->gfx.mec2_fw);
557 adev->gfx.mec2_fw = NULL;
559 kfree(adev->gfx.rlc.register_list_format);
562 static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
564 const struct rlc_firmware_header_v2_1 *rlc_hdr;
566 rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
567 adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
568 adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
569 adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
570 adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
571 adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
572 adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
573 adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
574 adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
575 adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
576 adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
577 adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
578 adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
579 adev->gfx.rlc.reg_list_format_direct_reg_list_length =
580 le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
583 static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
585 switch (adev->asic_type) {
587 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
594 static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
596 const char *chip_name;
599 struct amdgpu_firmware_info *info = NULL;
600 const struct common_firmware_header *header = NULL;
601 const struct gfx_firmware_header_v1_0 *cp_hdr;
602 const struct rlc_firmware_header_v2_0 *rlc_hdr;
603 unsigned int *tmp = NULL;
605 uint16_t version_major;
606 uint16_t version_minor;
610 switch (adev->asic_type) {
612 chip_name = "navi10";
615 chip_name = "navi14";
618 chip_name = "navi12";
624 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
625 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
628 err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
631 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
632 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
633 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
635 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
636 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
639 err = amdgpu_ucode_validate(adev->gfx.me_fw);
642 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
643 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
644 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
646 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
647 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
650 err = amdgpu_ucode_validate(adev->gfx.ce_fw);
653 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
654 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
655 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
657 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
658 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
661 err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
662 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
663 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
664 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
665 if (version_major == 2 && version_minor == 1)
666 adev->gfx.rlc.is_rlc_v2_1 = true;
668 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
669 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
670 adev->gfx.rlc.save_and_restore_offset =
671 le32_to_cpu(rlc_hdr->save_and_restore_offset);
672 adev->gfx.rlc.clear_state_descriptor_offset =
673 le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
674 adev->gfx.rlc.avail_scratch_ram_locations =
675 le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
676 adev->gfx.rlc.reg_restore_list_size =
677 le32_to_cpu(rlc_hdr->reg_restore_list_size);
678 adev->gfx.rlc.reg_list_format_start =
679 le32_to_cpu(rlc_hdr->reg_list_format_start);
680 adev->gfx.rlc.reg_list_format_separate_start =
681 le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
682 adev->gfx.rlc.starting_offsets_start =
683 le32_to_cpu(rlc_hdr->starting_offsets_start);
684 adev->gfx.rlc.reg_list_format_size_bytes =
685 le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
686 adev->gfx.rlc.reg_list_size_bytes =
687 le32_to_cpu(rlc_hdr->reg_list_size_bytes);
688 adev->gfx.rlc.register_list_format =
689 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
690 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
691 if (!adev->gfx.rlc.register_list_format) {
696 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
697 le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
698 for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
699 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
701 adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
703 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
704 le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
705 for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
706 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
708 if (adev->gfx.rlc.is_rlc_v2_1)
709 gfx_v10_0_init_rlc_ext_microcode(adev);
711 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
712 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
715 err = amdgpu_ucode_validate(adev->gfx.mec_fw);
718 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
719 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
720 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
722 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
723 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
725 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
728 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
729 adev->gfx.mec2_fw->data;
730 adev->gfx.mec2_fw_version =
731 le32_to_cpu(cp_hdr->header.ucode_version);
732 adev->gfx.mec2_feature_version =
733 le32_to_cpu(cp_hdr->ucode_feature_version);
736 adev->gfx.mec2_fw = NULL;
739 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
740 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
741 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
742 info->fw = adev->gfx.pfp_fw;
743 header = (const struct common_firmware_header *)info->fw->data;
744 adev->firmware.fw_size +=
745 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
747 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
748 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
749 info->fw = adev->gfx.me_fw;
750 header = (const struct common_firmware_header *)info->fw->data;
751 adev->firmware.fw_size +=
752 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
754 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
755 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
756 info->fw = adev->gfx.ce_fw;
757 header = (const struct common_firmware_header *)info->fw->data;
758 adev->firmware.fw_size +=
759 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
761 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
762 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
763 info->fw = adev->gfx.rlc_fw;
764 header = (const struct common_firmware_header *)info->fw->data;
765 adev->firmware.fw_size +=
766 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
768 if (adev->gfx.rlc.is_rlc_v2_1 &&
769 adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
770 adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
771 adev->gfx.rlc.save_restore_list_srm_size_bytes) {
772 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
773 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
774 info->fw = adev->gfx.rlc_fw;
775 adev->firmware.fw_size +=
776 ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
778 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
779 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
780 info->fw = adev->gfx.rlc_fw;
781 adev->firmware.fw_size +=
782 ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
784 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
785 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
786 info->fw = adev->gfx.rlc_fw;
787 adev->firmware.fw_size +=
788 ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
791 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
792 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
793 info->fw = adev->gfx.mec_fw;
794 header = (const struct common_firmware_header *)info->fw->data;
795 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
796 adev->firmware.fw_size +=
797 ALIGN(le32_to_cpu(header->ucode_size_bytes) -
798 le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
800 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
801 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
802 info->fw = adev->gfx.mec_fw;
803 adev->firmware.fw_size +=
804 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
806 if (adev->gfx.mec2_fw) {
807 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
808 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
809 info->fw = adev->gfx.mec2_fw;
810 header = (const struct common_firmware_header *)info->fw->data;
811 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
812 adev->firmware.fw_size +=
813 ALIGN(le32_to_cpu(header->ucode_size_bytes) -
814 le32_to_cpu(cp_hdr->jt_size) * 4,
816 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
817 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
818 info->fw = adev->gfx.mec2_fw;
819 adev->firmware.fw_size +=
820 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4,
828 "gfx10: Failed to load firmware \"%s\"\n",
830 release_firmware(adev->gfx.pfp_fw);
831 adev->gfx.pfp_fw = NULL;
832 release_firmware(adev->gfx.me_fw);
833 adev->gfx.me_fw = NULL;
834 release_firmware(adev->gfx.ce_fw);
835 adev->gfx.ce_fw = NULL;
836 release_firmware(adev->gfx.rlc_fw);
837 adev->gfx.rlc_fw = NULL;
838 release_firmware(adev->gfx.mec_fw);
839 adev->gfx.mec_fw = NULL;
840 release_firmware(adev->gfx.mec2_fw);
841 adev->gfx.mec2_fw = NULL;
844 gfx_v10_0_check_gfxoff_flag(adev);
849 static u32 gfx_v10_0_get_csb_size(struct amdgpu_device *adev)
852 const struct cs_section_def *sect = NULL;
853 const struct cs_extent_def *ext = NULL;
855 /* begin clear state */
857 /* context control state */
860 for (sect = gfx10_cs_data; sect->section != NULL; ++sect) {
861 for (ext = sect->section; ext->extent != NULL; ++ext) {
862 if (sect->id == SECT_CONTEXT)
863 count += 2 + ext->reg_count;
869 /* set PA_SC_TILE_STEERING_OVERRIDE */
871 /* end clear state */
879 static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev,
880 volatile u32 *buffer)
883 const struct cs_section_def *sect = NULL;
884 const struct cs_extent_def *ext = NULL;
887 if (adev->gfx.rlc.cs_data == NULL)
892 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
893 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
895 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
896 buffer[count++] = cpu_to_le32(0x80000000);
897 buffer[count++] = cpu_to_le32(0x80000000);
899 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
900 for (ext = sect->section; ext->extent != NULL; ++ext) {
901 if (sect->id == SECT_CONTEXT) {
903 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
904 buffer[count++] = cpu_to_le32(ext->reg_index -
905 PACKET3_SET_CONTEXT_REG_START);
906 for (i = 0; i < ext->reg_count; i++)
907 buffer[count++] = cpu_to_le32(ext->extent[i]);
915 SOC15_REG_OFFSET(GC, 0, mmPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
916 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
917 buffer[count++] = cpu_to_le32(ctx_reg_offset);
918 buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override);
920 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
921 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
923 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
924 buffer[count++] = cpu_to_le32(0);
927 static void gfx_v10_0_rlc_fini(struct amdgpu_device *adev)
929 /* clear state block */
930 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
931 &adev->gfx.rlc.clear_state_gpu_addr,
932 (void **)&adev->gfx.rlc.cs_ptr);
934 /* jump table block */
935 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
936 &adev->gfx.rlc.cp_table_gpu_addr,
937 (void **)&adev->gfx.rlc.cp_table_ptr);
940 static int gfx_v10_0_rlc_init(struct amdgpu_device *adev)
942 const struct cs_section_def *cs_data;
945 adev->gfx.rlc.cs_data = gfx10_cs_data;
947 cs_data = adev->gfx.rlc.cs_data;
950 /* init clear state block */
951 r = amdgpu_gfx_rlc_init_csb(adev);
959 static int gfx_v10_0_csb_vram_pin(struct amdgpu_device *adev)
963 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
964 if (unlikely(r != 0))
967 r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
968 AMDGPU_GEM_DOMAIN_VRAM);
970 adev->gfx.rlc.clear_state_gpu_addr =
971 amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
973 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
978 static void gfx_v10_0_csb_vram_unpin(struct amdgpu_device *adev)
982 if (!adev->gfx.rlc.clear_state_obj)
985 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
986 if (likely(r == 0)) {
987 amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
988 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
992 static void gfx_v10_0_mec_fini(struct amdgpu_device *adev)
994 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
995 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
998 static int gfx_v10_0_me_init(struct amdgpu_device *adev)
1002 bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
1004 amdgpu_gfx_graphics_queue_acquire(adev);
1006 r = gfx_v10_0_init_microcode(adev);
1008 DRM_ERROR("Failed to load gfx firmware!\n");
1013 static int gfx_v10_0_mec_init(struct amdgpu_device *adev)
1017 const __le32 *fw_data = NULL;
1020 size_t mec_hpd_size;
1022 const struct gfx_firmware_header_v1_0 *mec_hdr = NULL;
1024 bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1026 /* take ownership of the relevant compute queues */
1027 amdgpu_gfx_compute_queue_acquire(adev);
1028 mec_hpd_size = adev->gfx.num_compute_rings * GFX10_MEC_HPD_SIZE;
1030 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1031 AMDGPU_GEM_DOMAIN_GTT,
1032 &adev->gfx.mec.hpd_eop_obj,
1033 &adev->gfx.mec.hpd_eop_gpu_addr,
1036 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1037 gfx_v10_0_mec_fini(adev);
1041 memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
1043 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1044 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1046 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1047 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1049 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1050 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1051 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
1053 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
1054 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1055 &adev->gfx.mec.mec_fw_obj,
1056 &adev->gfx.mec.mec_fw_gpu_addr,
1059 dev_err(adev->dev, "(%d) failed to create mec fw bo\n", r);
1060 gfx_v10_0_mec_fini(adev);
1064 memcpy(fw, fw_data, fw_size);
1066 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1067 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1073 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address)
1075 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1076 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1077 (address << SQ_IND_INDEX__INDEX__SHIFT));
1078 return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1081 static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave,
1082 uint32_t thread, uint32_t regno,
1083 uint32_t num, uint32_t *out)
1085 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1086 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1087 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
1088 (thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) |
1089 (SQ_IND_INDEX__AUTO_INCR_MASK));
1091 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1094 static void gfx_v10_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
1096 /* in gfx10 the SIMD_ID is specified as part of the INSTANCE
1097 * field when performing a select_se_sh so it should be
1101 /* type 2 wave data */
1102 dst[(*no_fields)++] = 2;
1103 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS);
1104 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO);
1105 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI);
1106 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO);
1107 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI);
1108 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1);
1109 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2);
1110 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_INST_DW0);
1111 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC);
1112 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC);
1113 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAPSTS);
1114 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS);
1115 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2);
1116 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1);
1117 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0);
1120 static void gfx_v10_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
1121 uint32_t wave, uint32_t start,
1122 uint32_t size, uint32_t *dst)
1127 adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size,
1131 static void gfx_v10_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
1132 uint32_t wave, uint32_t thread,
1133 uint32_t start, uint32_t size,
1138 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
1141 static void gfx_v10_0_select_me_pipe_q(struct amdgpu_device *adev,
1142 u32 me, u32 pipe, u32 q, u32 vm)
1144 nv_grbm_select(adev, me, pipe, q, vm);
1148 static const struct amdgpu_gfx_funcs gfx_v10_0_gfx_funcs = {
1149 .get_gpu_clock_counter = &gfx_v10_0_get_gpu_clock_counter,
1150 .select_se_sh = &gfx_v10_0_select_se_sh,
1151 .read_wave_data = &gfx_v10_0_read_wave_data,
1152 .read_wave_sgprs = &gfx_v10_0_read_wave_sgprs,
1153 .read_wave_vgprs = &gfx_v10_0_read_wave_vgprs,
1154 .select_me_pipe_q = &gfx_v10_0_select_me_pipe_q,
1157 static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)
1161 adev->gfx.funcs = &gfx_v10_0_gfx_funcs;
1163 switch (adev->asic_type) {
1167 adev->gfx.config.max_hw_contexts = 8;
1168 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1169 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1170 adev->gfx.config.sc_hiz_tile_fifo_size = 0;
1171 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1172 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
1179 adev->gfx.config.gb_addr_config = gb_addr_config;
1181 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
1182 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1183 GB_ADDR_CONFIG, NUM_PIPES);
1185 adev->gfx.config.max_tile_pipes =
1186 adev->gfx.config.gb_addr_config_fields.num_pipes;
1188 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
1189 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1190 GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS);
1191 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
1192 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1193 GB_ADDR_CONFIG, NUM_RB_PER_SE);
1194 adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
1195 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1196 GB_ADDR_CONFIG, NUM_SHADER_ENGINES);
1197 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
1198 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1199 GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE));
1202 static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
1203 int me, int pipe, int queue)
1206 struct amdgpu_ring *ring;
1207 unsigned int irq_type;
1209 ring = &adev->gfx.gfx_ring[ring_id];
1213 ring->queue = queue;
1215 ring->ring_obj = NULL;
1216 ring->use_doorbell = true;
1219 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
1221 ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1;
1222 sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1224 irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
1225 r = amdgpu_ring_init(adev, ring, 1024,
1226 &adev->gfx.eop_irq, irq_type);
1232 static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1233 int mec, int pipe, int queue)
1237 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1239 ring = &adev->gfx.compute_ring[ring_id];
1244 ring->queue = queue;
1246 ring->ring_obj = NULL;
1247 ring->use_doorbell = true;
1248 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
1249 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1250 + (ring_id * GFX10_MEC_HPD_SIZE);
1251 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1253 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1254 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1257 /* type-2 packets are deprecated on MEC, use type-3 instead */
1258 r = amdgpu_ring_init(adev, ring, 1024,
1259 &adev->gfx.eop_irq, irq_type);
1266 static int gfx_v10_0_sw_init(void *handle)
1268 int i, j, k, r, ring_id = 0;
1269 struct amdgpu_kiq *kiq;
1270 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1272 switch (adev->asic_type) {
1276 adev->gfx.me.num_me = 1;
1277 adev->gfx.me.num_pipe_per_me = 2;
1278 adev->gfx.me.num_queue_per_pipe = 1;
1279 adev->gfx.mec.num_mec = 2;
1280 adev->gfx.mec.num_pipe_per_mec = 4;
1281 adev->gfx.mec.num_queue_per_pipe = 8;
1284 adev->gfx.me.num_me = 1;
1285 adev->gfx.me.num_pipe_per_me = 1;
1286 adev->gfx.me.num_queue_per_pipe = 1;
1287 adev->gfx.mec.num_mec = 1;
1288 adev->gfx.mec.num_pipe_per_mec = 4;
1289 adev->gfx.mec.num_queue_per_pipe = 8;
1294 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
1295 GFX_10_1__SRCID__CP_IB2_INTERRUPT_PKT,
1296 &adev->gfx.kiq.irq);
1301 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
1302 GFX_10_1__SRCID__CP_EOP_INTERRUPT,
1303 &adev->gfx.eop_irq);
1307 /* Privileged reg */
1308 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_10_1__SRCID__CP_PRIV_REG_FAULT,
1309 &adev->gfx.priv_reg_irq);
1313 /* Privileged inst */
1314 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_10_1__SRCID__CP_PRIV_INSTR_FAULT,
1315 &adev->gfx.priv_inst_irq);
1319 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1321 gfx_v10_0_scratch_init(adev);
1323 r = gfx_v10_0_me_init(adev);
1327 r = gfx_v10_0_rlc_init(adev);
1329 DRM_ERROR("Failed to init rlc BOs!\n");
1333 r = gfx_v10_0_mec_init(adev);
1335 DRM_ERROR("Failed to init MEC BOs!\n");
1339 /* set up the gfx ring */
1340 for (i = 0; i < adev->gfx.me.num_me; i++) {
1341 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
1342 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
1343 if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
1346 r = gfx_v10_0_gfx_ring_init(adev, ring_id,
1356 /* set up the compute queues - allocate horizontally across pipes */
1357 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1358 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1359 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1360 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k,
1364 r = gfx_v10_0_compute_ring_init(adev, ring_id,
1374 r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE);
1376 DRM_ERROR("Failed to init KIQ BOs!\n");
1380 kiq = &adev->gfx.kiq;
1381 r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
1385 r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v10_compute_mqd));
1389 /* allocate visible FB for rlc auto-loading fw */
1390 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1391 r = gfx_v10_0_rlc_backdoor_autoload_buffer_init(adev);
1396 adev->gfx.ce_ram_size = F32_CE_PROGRAM_RAM_SIZE;
1398 gfx_v10_0_gpu_early_init(adev);
1403 static void gfx_v10_0_pfp_fini(struct amdgpu_device *adev)
1405 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj,
1406 &adev->gfx.pfp.pfp_fw_gpu_addr,
1407 (void **)&adev->gfx.pfp.pfp_fw_ptr);
1410 static void gfx_v10_0_ce_fini(struct amdgpu_device *adev)
1412 amdgpu_bo_free_kernel(&adev->gfx.ce.ce_fw_obj,
1413 &adev->gfx.ce.ce_fw_gpu_addr,
1414 (void **)&adev->gfx.ce.ce_fw_ptr);
1417 static void gfx_v10_0_me_fini(struct amdgpu_device *adev)
1419 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj,
1420 &adev->gfx.me.me_fw_gpu_addr,
1421 (void **)&adev->gfx.me.me_fw_ptr);
1424 static int gfx_v10_0_sw_fini(void *handle)
1427 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1429 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1430 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1431 for (i = 0; i < adev->gfx.num_compute_rings; i++)
1432 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1434 amdgpu_gfx_mqd_sw_fini(adev);
1435 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
1436 amdgpu_gfx_kiq_fini(adev);
1438 gfx_v10_0_pfp_fini(adev);
1439 gfx_v10_0_ce_fini(adev);
1440 gfx_v10_0_me_fini(adev);
1441 gfx_v10_0_rlc_fini(adev);
1442 gfx_v10_0_mec_fini(adev);
1444 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
1445 gfx_v10_0_rlc_backdoor_autoload_buffer_fini(adev);
1447 gfx_v10_0_free_microcode(adev);
1453 static void gfx_v10_0_tiling_mode_table_init(struct amdgpu_device *adev)
1458 static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
1459 u32 sh_num, u32 instance)
1463 if (instance == 0xffffffff)
1464 data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
1465 INSTANCE_BROADCAST_WRITES, 1);
1467 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
1470 if (se_num == 0xffffffff)
1471 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
1474 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1476 if (sh_num == 0xffffffff)
1477 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES,
1480 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num);
1482 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
1485 static u32 gfx_v10_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1489 data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
1490 data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
1492 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1493 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1495 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
1496 adev->gfx.config.max_sh_per_se);
1498 return (~data) & mask;
1501 static void gfx_v10_0_setup_rb(struct amdgpu_device *adev)
1506 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1507 adev->gfx.config.max_sh_per_se;
1509 mutex_lock(&adev->grbm_idx_mutex);
1510 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1511 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1512 gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
1513 data = gfx_v10_0_get_rb_active_bitmap(adev);
1514 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1515 rb_bitmap_width_per_sh);
1518 gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1519 mutex_unlock(&adev->grbm_idx_mutex);
1521 adev->gfx.config.backend_enable_mask = active_rbs;
1522 adev->gfx.config.num_rbs = hweight32(active_rbs);
1525 static u32 gfx_v10_0_init_pa_sc_tile_steering_override(struct amdgpu_device *adev)
1528 uint32_t enabled_rb_per_sh;
1529 uint32_t active_rb_bitmap;
1530 uint32_t num_rb_per_sc;
1531 uint32_t num_packer_per_sc;
1532 uint32_t pa_sc_tile_steering_override;
1535 num_sc = adev->gfx.config.max_shader_engines * adev->gfx.config.max_sh_per_se *
1536 adev->gfx.config.num_sc_per_sh;
1537 /* init num_rb_per_sc */
1538 active_rb_bitmap = gfx_v10_0_get_rb_active_bitmap(adev);
1539 enabled_rb_per_sh = hweight32(active_rb_bitmap);
1540 num_rb_per_sc = enabled_rb_per_sh / adev->gfx.config.num_sc_per_sh;
1541 /* init num_packer_per_sc */
1542 num_packer_per_sc = adev->gfx.config.num_packer_per_sc;
1544 pa_sc_tile_steering_override = 0;
1545 pa_sc_tile_steering_override |=
1546 (order_base_2(num_sc) << PA_SC_TILE_STEERING_OVERRIDE__NUM_SC__SHIFT) &
1547 PA_SC_TILE_STEERING_OVERRIDE__NUM_SC_MASK;
1548 pa_sc_tile_steering_override |=
1549 (order_base_2(num_rb_per_sc) << PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SC__SHIFT) &
1550 PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SC_MASK;
1551 pa_sc_tile_steering_override |=
1552 (order_base_2(num_packer_per_sc) << PA_SC_TILE_STEERING_OVERRIDE__NUM_PACKER_PER_SC__SHIFT) &
1553 PA_SC_TILE_STEERING_OVERRIDE__NUM_PACKER_PER_SC_MASK;
1555 return pa_sc_tile_steering_override;
1558 #define DEFAULT_SH_MEM_BASES (0x6000)
1559 #define FIRST_COMPUTE_VMID (8)
1560 #define LAST_COMPUTE_VMID (16)
1562 static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev)
1565 uint32_t sh_mem_bases;
1568 * Configure apertures:
1569 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1570 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1571 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1573 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1575 mutex_lock(&adev->srbm_mutex);
1576 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1577 nv_grbm_select(adev, 0, 0, 0, i);
1578 /* CP and shaders */
1579 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1580 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
1582 nv_grbm_select(adev, 0, 0, 0, 0);
1583 mutex_unlock(&adev->srbm_mutex);
1585 /* Initialize all compute VMIDs to have no GDS, GWS, or OA
1586 acccess. These should be enabled by FW for target VMIDs. */
1587 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1588 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
1589 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
1590 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
1591 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
1595 static void gfx_v10_0_init_gds_vmid(struct amdgpu_device *adev)
1600 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
1601 * access. Compute VMIDs should be enabled by FW for target VMIDs,
1602 * the driver can enable them for graphics. VMID0 should maintain
1603 * access so that HWS firmware can save/restore entries.
1605 for (vmid = 1; vmid < 16; vmid++) {
1606 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0);
1607 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0);
1608 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0);
1609 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, vmid, 0);
1614 static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
1617 int max_wgp_per_sh = adev->gfx.config.max_cu_per_sh >> 1;
1618 u32 tmp, wgp_active_bitmap = 0;
1619 u32 gcrd_targets_disable_tcp = 0;
1620 u32 utcl_invreq_disable = 0;
1622 * GCRD_TARGETS_DISABLE field contains
1623 * for Navi10/Navi12: GL1C=[18:15], SQC=[14:10], TCP=[9:0]
1624 * for Navi14: GL1C=[21:18], SQC=[17:12], TCP=[11:0]
1626 u32 gcrd_targets_disable_mask = amdgpu_gfx_create_bitmask(
1627 2 * max_wgp_per_sh + /* TCP */
1628 max_wgp_per_sh + /* SQC */
1631 * UTCL1_UTCL0_INVREQ_DISABLE field contains
1632 * for Navi10Navi12: SQG=[24], RMI=[23:20], SQC=[19:10], TCP=[9:0]
1633 * for Navi14: SQG=[28], RMI=[27:24], SQC=[23:12], TCP=[11:0]
1635 u32 utcl_invreq_disable_mask = amdgpu_gfx_create_bitmask(
1636 2 * max_wgp_per_sh + /* TCP */
1637 2 * max_wgp_per_sh + /* SQC */
1641 if (adev->asic_type == CHIP_NAVI10 ||
1642 adev->asic_type == CHIP_NAVI14 ||
1643 adev->asic_type == CHIP_NAVI12) {
1644 mutex_lock(&adev->grbm_idx_mutex);
1645 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1646 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1647 gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
1648 wgp_active_bitmap = gfx_v10_0_get_wgp_active_bitmap_per_sh(adev);
1650 * Set corresponding TCP bits for the inactive WGPs in
1651 * GCRD_SA_TARGETS_DISABLE
1653 gcrd_targets_disable_tcp = 0;
1654 /* Set TCP & SQC bits in UTCL1_UTCL0_INVREQ_DISABLE */
1655 utcl_invreq_disable = 0;
1657 for (k = 0; k < max_wgp_per_sh; k++) {
1658 if (!(wgp_active_bitmap & (1 << k))) {
1659 gcrd_targets_disable_tcp |= 3 << (2 * k);
1660 utcl_invreq_disable |= (3 << (2 * k)) |
1661 (3 << (2 * (max_wgp_per_sh + k)));
1665 tmp = RREG32_SOC15(GC, 0, mmUTCL1_UTCL0_INVREQ_DISABLE);
1666 /* only override TCP & SQC bits */
1667 tmp &= 0xffffffff << (4 * max_wgp_per_sh);
1668 tmp |= (utcl_invreq_disable & utcl_invreq_disable_mask);
1669 WREG32_SOC15(GC, 0, mmUTCL1_UTCL0_INVREQ_DISABLE, tmp);
1671 tmp = RREG32_SOC15(GC, 0, mmGCRD_SA_TARGETS_DISABLE);
1672 /* only override TCP bits */
1673 tmp &= 0xffffffff << (2 * max_wgp_per_sh);
1674 tmp |= (gcrd_targets_disable_tcp & gcrd_targets_disable_mask);
1675 WREG32_SOC15(GC, 0, mmGCRD_SA_TARGETS_DISABLE, tmp);
1679 gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1680 mutex_unlock(&adev->grbm_idx_mutex);
1684 static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
1689 WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1691 gfx_v10_0_tiling_mode_table_init(adev);
1693 gfx_v10_0_setup_rb(adev);
1694 gfx_v10_0_get_cu_info(adev, &adev->gfx.cu_info);
1695 adev->gfx.config.pa_sc_tile_steering_override =
1696 gfx_v10_0_init_pa_sc_tile_steering_override(adev);
1698 /* XXX SH_MEM regs */
1699 /* where to put LDS, scratch, GPUVM in FSA64 space */
1700 mutex_lock(&adev->srbm_mutex);
1701 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) {
1702 nv_grbm_select(adev, 0, 0, 0, i);
1703 /* CP and shaders */
1704 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1706 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1707 (adev->gmc.private_aperture_start >> 48));
1708 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1709 (adev->gmc.shared_aperture_start >> 48));
1710 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
1713 nv_grbm_select(adev, 0, 0, 0, 0);
1715 mutex_unlock(&adev->srbm_mutex);
1717 gfx_v10_0_init_compute_vmid(adev);
1718 gfx_v10_0_init_gds_vmid(adev);
1722 static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1725 u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
1727 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
1729 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
1731 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
1733 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
1736 WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
1739 static void gfx_v10_0_init_csb(struct amdgpu_device *adev)
1742 WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI,
1743 adev->gfx.rlc.clear_state_gpu_addr >> 32);
1744 WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO,
1745 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1746 WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
1749 static void gfx_v10_0_init_pg(struct amdgpu_device *adev)
1751 gfx_v10_0_init_csb(adev);
1753 amdgpu_gmc_flush_gpu_tlb(adev, 0, 0);
1755 /* TODO: init power gating */
1759 void gfx_v10_0_rlc_stop(struct amdgpu_device *adev)
1761 u32 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL);
1763 tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
1764 WREG32_SOC15(GC, 0, mmRLC_CNTL, tmp);
1767 static void gfx_v10_0_rlc_reset(struct amdgpu_device *adev)
1769 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
1771 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
1775 static void gfx_v10_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev,
1778 uint32_t rlc_pg_cntl;
1780 rlc_pg_cntl = RREG32_SOC15(GC, 0, mmRLC_PG_CNTL);
1783 /* RLC_PG_CNTL[23] = 0 (default)
1784 * RLC will wait for handshake acks with SMU
1785 * GFXOFF will be enabled
1786 * RLC_PG_CNTL[23] = 1
1787 * RLC will not issue any message to SMU
1788 * hence no handshake between SMU & RLC
1789 * GFXOFF will be disabled
1791 rlc_pg_cntl |= 0x800000;
1793 rlc_pg_cntl &= ~0x800000;
1794 WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, rlc_pg_cntl);
1797 static void gfx_v10_0_rlc_start(struct amdgpu_device *adev)
1799 /* TODO: enable rlc & smu handshake until smu
1800 * and gfxoff feature works as expected */
1801 if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK))
1802 gfx_v10_0_rlc_smu_handshake_cntl(adev, false);
1804 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
1808 static void gfx_v10_0_rlc_enable_srm(struct amdgpu_device *adev)
1812 /* enable Save Restore Machine */
1813 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
1814 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
1815 tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
1816 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
1819 static int gfx_v10_0_rlc_load_microcode(struct amdgpu_device *adev)
1821 const struct rlc_firmware_header_v2_0 *hdr;
1822 const __le32 *fw_data;
1823 unsigned i, fw_size;
1825 if (!adev->gfx.rlc_fw)
1828 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1829 amdgpu_ucode_print_rlc_hdr(&hdr->header);
1831 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1832 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1833 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1835 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
1836 RLCG_UCODE_LOADING_START_ADDRESS);
1838 for (i = 0; i < fw_size; i++)
1839 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA,
1840 le32_to_cpup(fw_data++));
1842 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
1847 static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
1851 if (amdgpu_sriov_vf(adev))
1854 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1855 r = gfx_v10_0_wait_for_rlc_autoload_complete(adev);
1858 gfx_v10_0_init_pg(adev);
1860 /* enable RLC SRM */
1861 gfx_v10_0_rlc_enable_srm(adev);
1864 adev->gfx.rlc.funcs->stop(adev);
1867 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
1870 WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, 0);
1872 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1873 /* legacy rlc firmware loading */
1874 r = gfx_v10_0_rlc_load_microcode(adev);
1877 } else if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1878 /* rlc backdoor autoload firmware */
1879 r = gfx_v10_0_rlc_backdoor_autoload_enable(adev);
1884 gfx_v10_0_init_pg(adev);
1885 adev->gfx.rlc.funcs->start(adev);
1887 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1888 r = gfx_v10_0_wait_for_rlc_autoload_complete(adev);
1898 unsigned int offset;
1900 } rlc_autoload_info[FIRMWARE_ID_MAX];
1902 static int gfx_v10_0_parse_rlc_toc(struct amdgpu_device *adev)
1905 RLC_TABLE_OF_CONTENT *rlc_toc;
1907 ret = amdgpu_bo_create_reserved(adev, adev->psp.toc_bin_size, PAGE_SIZE,
1908 AMDGPU_GEM_DOMAIN_GTT,
1909 &adev->gfx.rlc.rlc_toc_bo,
1910 &adev->gfx.rlc.rlc_toc_gpu_addr,
1911 (void **)&adev->gfx.rlc.rlc_toc_buf);
1913 dev_err(adev->dev, "(%d) failed to create rlc toc bo\n", ret);
1917 /* Copy toc from psp sos fw to rlc toc buffer */
1918 memcpy(adev->gfx.rlc.rlc_toc_buf, adev->psp.toc_start_addr, adev->psp.toc_bin_size);
1920 rlc_toc = (RLC_TABLE_OF_CONTENT *)adev->gfx.rlc.rlc_toc_buf;
1921 while (rlc_toc && (rlc_toc->id > FIRMWARE_ID_INVALID) &&
1922 (rlc_toc->id < FIRMWARE_ID_MAX)) {
1923 if ((rlc_toc->id >= FIRMWARE_ID_CP_CE) &&
1924 (rlc_toc->id <= FIRMWARE_ID_CP_MES)) {
1925 /* Offset needs 4KB alignment */
1926 rlc_toc->offset = ALIGN(rlc_toc->offset * 4, PAGE_SIZE);
1929 rlc_autoload_info[rlc_toc->id].id = rlc_toc->id;
1930 rlc_autoload_info[rlc_toc->id].offset = rlc_toc->offset * 4;
1931 rlc_autoload_info[rlc_toc->id].size = rlc_toc->size * 4;
1939 static uint32_t gfx_v10_0_calc_toc_total_size(struct amdgpu_device *adev)
1941 uint32_t total_size = 0;
1945 ret = gfx_v10_0_parse_rlc_toc(adev);
1947 dev_err(adev->dev, "failed to parse rlc toc\n");
1951 for (id = FIRMWARE_ID_RLC_G_UCODE; id < FIRMWARE_ID_MAX; id++)
1952 total_size += rlc_autoload_info[id].size;
1954 /* In case the offset in rlc toc ucode is aligned */
1955 if (total_size < rlc_autoload_info[FIRMWARE_ID_MAX-1].offset)
1956 total_size = rlc_autoload_info[FIRMWARE_ID_MAX-1].offset +
1957 rlc_autoload_info[FIRMWARE_ID_MAX-1].size;
1962 static int gfx_v10_0_rlc_backdoor_autoload_buffer_init(struct amdgpu_device *adev)
1965 uint32_t total_size;
1967 total_size = gfx_v10_0_calc_toc_total_size(adev);
1969 r = amdgpu_bo_create_reserved(adev, total_size, PAGE_SIZE,
1970 AMDGPU_GEM_DOMAIN_GTT,
1971 &adev->gfx.rlc.rlc_autoload_bo,
1972 &adev->gfx.rlc.rlc_autoload_gpu_addr,
1973 (void **)&adev->gfx.rlc.rlc_autoload_ptr);
1975 dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r);
1982 static void gfx_v10_0_rlc_backdoor_autoload_buffer_fini(struct amdgpu_device *adev)
1984 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_toc_bo,
1985 &adev->gfx.rlc.rlc_toc_gpu_addr,
1986 (void **)&adev->gfx.rlc.rlc_toc_buf);
1987 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo,
1988 &adev->gfx.rlc.rlc_autoload_gpu_addr,
1989 (void **)&adev->gfx.rlc.rlc_autoload_ptr);
1992 static void gfx_v10_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev,
1994 const void *fw_data,
1997 uint32_t toc_offset;
1998 uint32_t toc_fw_size;
1999 char *ptr = adev->gfx.rlc.rlc_autoload_ptr;
2001 if (id <= FIRMWARE_ID_INVALID || id >= FIRMWARE_ID_MAX)
2004 toc_offset = rlc_autoload_info[id].offset;
2005 toc_fw_size = rlc_autoload_info[id].size;
2008 fw_size = toc_fw_size;
2010 if (fw_size > toc_fw_size)
2011 fw_size = toc_fw_size;
2013 memcpy(ptr + toc_offset, fw_data, fw_size);
2015 if (fw_size < toc_fw_size)
2016 memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size);
2019 static void gfx_v10_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev)
2024 data = adev->gfx.rlc.rlc_toc_buf;
2025 size = rlc_autoload_info[FIRMWARE_ID_RLC_TOC].size;
2027 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2028 FIRMWARE_ID_RLC_TOC,
2032 static void gfx_v10_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev)
2034 const __le32 *fw_data;
2036 const struct gfx_firmware_header_v1_0 *cp_hdr;
2037 const struct rlc_firmware_header_v2_0 *rlc_hdr;
2040 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
2041 adev->gfx.pfp_fw->data;
2042 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
2043 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
2044 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
2045 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2050 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
2051 adev->gfx.ce_fw->data;
2052 fw_data = (const __le32 *)(adev->gfx.ce_fw->data +
2053 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
2054 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
2055 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2060 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
2061 adev->gfx.me_fw->data;
2062 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
2063 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
2064 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
2065 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2070 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)
2071 adev->gfx.rlc_fw->data;
2072 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2073 le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes));
2074 fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes);
2075 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2076 FIRMWARE_ID_RLC_G_UCODE,
2080 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
2081 adev->gfx.mec_fw->data;
2082 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
2083 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
2084 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
2085 cp_hdr->jt_size * 4;
2086 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2089 /* mec2 ucode is not necessary if mec2 ucode is same as mec1 */
2092 /* Temporarily put sdma part here */
2093 static void gfx_v10_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev)
2095 const __le32 *fw_data;
2097 const struct sdma_firmware_header_v1_0 *sdma_hdr;
2100 for (i = 0; i < adev->sdma.num_instances; i++) {
2101 sdma_hdr = (const struct sdma_firmware_header_v1_0 *)
2102 adev->sdma.instance[i].fw->data;
2103 fw_data = (const __le32 *) (adev->sdma.instance[i].fw->data +
2104 le32_to_cpu(sdma_hdr->header.ucode_array_offset_bytes));
2105 fw_size = le32_to_cpu(sdma_hdr->header.ucode_size_bytes);
2108 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2109 FIRMWARE_ID_SDMA0_UCODE, fw_data, fw_size);
2110 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2111 FIRMWARE_ID_SDMA0_JT,
2112 (uint32_t *)fw_data +
2113 sdma_hdr->jt_offset,
2114 sdma_hdr->jt_size * 4);
2115 } else if (i == 1) {
2116 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2117 FIRMWARE_ID_SDMA1_UCODE, fw_data, fw_size);
2118 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2119 FIRMWARE_ID_SDMA1_JT,
2120 (uint32_t *)fw_data +
2121 sdma_hdr->jt_offset,
2122 sdma_hdr->jt_size * 4);
2127 static int gfx_v10_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev)
2129 uint32_t rlc_g_offset, rlc_g_size, tmp;
2132 gfx_v10_0_rlc_backdoor_autoload_copy_toc_ucode(adev);
2133 gfx_v10_0_rlc_backdoor_autoload_copy_sdma_ucode(adev);
2134 gfx_v10_0_rlc_backdoor_autoload_copy_gfx_ucode(adev);
2136 rlc_g_offset = rlc_autoload_info[FIRMWARE_ID_RLC_G_UCODE].offset;
2137 rlc_g_size = rlc_autoload_info[FIRMWARE_ID_RLC_G_UCODE].size;
2138 gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset;
2140 WREG32_SOC15(GC, 0, mmRLC_HYP_BOOTLOAD_ADDR_HI, upper_32_bits(gpu_addr));
2141 WREG32_SOC15(GC, 0, mmRLC_HYP_BOOTLOAD_ADDR_LO, lower_32_bits(gpu_addr));
2142 WREG32_SOC15(GC, 0, mmRLC_HYP_BOOTLOAD_SIZE, rlc_g_size);
2144 tmp = RREG32_SOC15(GC, 0, mmRLC_HYP_RESET_VECTOR);
2145 if (!(tmp & (RLC_HYP_RESET_VECTOR__COLD_BOOT_EXIT_MASK |
2146 RLC_HYP_RESET_VECTOR__VDDGFX_EXIT_MASK))) {
2147 DRM_ERROR("Neither COLD_BOOT_EXIT nor VDDGFX_EXIT is set\n");
2151 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL);
2152 if (tmp & RLC_CNTL__RLC_ENABLE_F32_MASK) {
2153 DRM_ERROR("RLC ROM should halt itself\n");
2160 static int gfx_v10_0_rlc_backdoor_autoload_config_me_cache(struct amdgpu_device *adev)
2162 uint32_t usec_timeout = 50000; /* wait for 50ms */
2167 /* Trigger an invalidation of the L1 instruction caches */
2168 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL);
2169 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2170 WREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL, tmp);
2172 /* Wait for invalidation complete */
2173 for (i = 0; i < usec_timeout; i++) {
2174 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL);
2175 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2176 INVALIDATE_CACHE_COMPLETE))
2181 if (i >= usec_timeout) {
2182 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2186 /* Program me ucode address into intruction cache address register */
2187 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2188 rlc_autoload_info[FIRMWARE_ID_CP_ME].offset;
2189 WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_LO,
2190 lower_32_bits(addr) & 0xFFFFF000);
2191 WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_HI,
2192 upper_32_bits(addr));
2197 static int gfx_v10_0_rlc_backdoor_autoload_config_ce_cache(struct amdgpu_device *adev)
2199 uint32_t usec_timeout = 50000; /* wait for 50ms */
2204 /* Trigger an invalidation of the L1 instruction caches */
2205 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL);
2206 tmp = REG_SET_FIELD(tmp, CP_CE_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2207 WREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL, tmp);
2209 /* Wait for invalidation complete */
2210 for (i = 0; i < usec_timeout; i++) {
2211 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL);
2212 if (1 == REG_GET_FIELD(tmp, CP_CE_IC_OP_CNTL,
2213 INVALIDATE_CACHE_COMPLETE))
2218 if (i >= usec_timeout) {
2219 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2223 /* Program ce ucode address into intruction cache address register */
2224 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2225 rlc_autoload_info[FIRMWARE_ID_CP_CE].offset;
2226 WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_LO,
2227 lower_32_bits(addr) & 0xFFFFF000);
2228 WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_HI,
2229 upper_32_bits(addr));
2234 static int gfx_v10_0_rlc_backdoor_autoload_config_pfp_cache(struct amdgpu_device *adev)
2236 uint32_t usec_timeout = 50000; /* wait for 50ms */
2241 /* Trigger an invalidation of the L1 instruction caches */
2242 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL);
2243 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2244 WREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL, tmp);
2246 /* Wait for invalidation complete */
2247 for (i = 0; i < usec_timeout; i++) {
2248 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL);
2249 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2250 INVALIDATE_CACHE_COMPLETE))
2255 if (i >= usec_timeout) {
2256 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2260 /* Program pfp ucode address into intruction cache address register */
2261 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2262 rlc_autoload_info[FIRMWARE_ID_CP_PFP].offset;
2263 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_LO,
2264 lower_32_bits(addr) & 0xFFFFF000);
2265 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_HI,
2266 upper_32_bits(addr));
2271 static int gfx_v10_0_rlc_backdoor_autoload_config_mec_cache(struct amdgpu_device *adev)
2273 uint32_t usec_timeout = 50000; /* wait for 50ms */
2278 /* Trigger an invalidation of the L1 instruction caches */
2279 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL);
2280 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2281 WREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL, tmp);
2283 /* Wait for invalidation complete */
2284 for (i = 0; i < usec_timeout; i++) {
2285 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL);
2286 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2287 INVALIDATE_CACHE_COMPLETE))
2292 if (i >= usec_timeout) {
2293 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2297 /* Program mec1 ucode address into intruction cache address register */
2298 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2299 rlc_autoload_info[FIRMWARE_ID_CP_MEC].offset;
2300 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
2301 lower_32_bits(addr) & 0xFFFFF000);
2302 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
2303 upper_32_bits(addr));
2308 static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
2311 uint32_t bootload_status;
2314 for (i = 0; i < adev->usec_timeout; i++) {
2315 cp_status = RREG32_SOC15(GC, 0, mmCP_STAT);
2316 bootload_status = RREG32_SOC15(GC, 0, mmRLC_RLCS_BOOTLOAD_STATUS);
2317 if ((cp_status == 0) &&
2318 (REG_GET_FIELD(bootload_status,
2319 RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) {
2325 if (i >= adev->usec_timeout) {
2326 dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n");
2330 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
2331 r = gfx_v10_0_rlc_backdoor_autoload_config_me_cache(adev);
2335 r = gfx_v10_0_rlc_backdoor_autoload_config_ce_cache(adev);
2339 r = gfx_v10_0_rlc_backdoor_autoload_config_pfp_cache(adev);
2343 r = gfx_v10_0_rlc_backdoor_autoload_config_mec_cache(adev);
2351 static void gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2354 u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
2356 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2357 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2358 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
2360 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2361 adev->gfx.gfx_ring[i].sched.ready = false;
2363 WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
2367 static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
2370 const struct gfx_firmware_header_v1_0 *pfp_hdr;
2371 const __le32 *fw_data;
2372 unsigned i, fw_size;
2374 uint32_t usec_timeout = 50000; /* wait for 50ms */
2376 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2377 adev->gfx.pfp_fw->data;
2379 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2381 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
2382 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2383 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes);
2385 r = amdgpu_bo_create_reserved(adev, pfp_hdr->header.ucode_size_bytes,
2386 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2387 &adev->gfx.pfp.pfp_fw_obj,
2388 &adev->gfx.pfp.pfp_fw_gpu_addr,
2389 (void **)&adev->gfx.pfp.pfp_fw_ptr);
2391 dev_err(adev->dev, "(%d) failed to create pfp fw bo\n", r);
2392 gfx_v10_0_pfp_fini(adev);
2396 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_data, fw_size);
2398 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj);
2399 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj);
2401 /* Trigger an invalidation of the L1 instruction caches */
2402 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL);
2403 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2404 WREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL, tmp);
2406 /* Wait for invalidation complete */
2407 for (i = 0; i < usec_timeout; i++) {
2408 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL);
2409 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2410 INVALIDATE_CACHE_COMPLETE))
2415 if (i >= usec_timeout) {
2416 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2420 if (amdgpu_emu_mode == 1)
2421 adev->nbio_funcs->hdp_flush(adev, NULL);
2423 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL);
2424 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2425 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2426 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2427 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2428 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL, tmp);
2429 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_LO,
2430 adev->gfx.pfp.pfp_fw_gpu_addr & 0xFFFFF000);
2431 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_HI,
2432 upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
2437 static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
2440 const struct gfx_firmware_header_v1_0 *ce_hdr;
2441 const __le32 *fw_data;
2442 unsigned i, fw_size;
2444 uint32_t usec_timeout = 50000; /* wait for 50ms */
2446 ce_hdr = (const struct gfx_firmware_header_v1_0 *)
2447 adev->gfx.ce_fw->data;
2449 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2451 fw_data = (const __le32 *)(adev->gfx.ce_fw->data +
2452 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2453 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes);
2455 r = amdgpu_bo_create_reserved(adev, ce_hdr->header.ucode_size_bytes,
2456 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2457 &adev->gfx.ce.ce_fw_obj,
2458 &adev->gfx.ce.ce_fw_gpu_addr,
2459 (void **)&adev->gfx.ce.ce_fw_ptr);
2461 dev_err(adev->dev, "(%d) failed to create ce fw bo\n", r);
2462 gfx_v10_0_ce_fini(adev);
2466 memcpy(adev->gfx.ce.ce_fw_ptr, fw_data, fw_size);
2468 amdgpu_bo_kunmap(adev->gfx.ce.ce_fw_obj);
2469 amdgpu_bo_unreserve(adev->gfx.ce.ce_fw_obj);
2471 /* Trigger an invalidation of the L1 instruction caches */
2472 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL);
2473 tmp = REG_SET_FIELD(tmp, CP_CE_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2474 WREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL, tmp);
2476 /* Wait for invalidation complete */
2477 for (i = 0; i < usec_timeout; i++) {
2478 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL);
2479 if (1 == REG_GET_FIELD(tmp, CP_CE_IC_OP_CNTL,
2480 INVALIDATE_CACHE_COMPLETE))
2485 if (i >= usec_timeout) {
2486 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2490 if (amdgpu_emu_mode == 1)
2491 adev->nbio_funcs->hdp_flush(adev, NULL);
2493 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL);
2494 tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0);
2495 tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, CACHE_POLICY, 0);
2496 tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, EXE_DISABLE, 0);
2497 tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2498 WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_LO,
2499 adev->gfx.ce.ce_fw_gpu_addr & 0xFFFFF000);
2500 WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_HI,
2501 upper_32_bits(adev->gfx.ce.ce_fw_gpu_addr));
2506 static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
2509 const struct gfx_firmware_header_v1_0 *me_hdr;
2510 const __le32 *fw_data;
2511 unsigned i, fw_size;
2513 uint32_t usec_timeout = 50000; /* wait for 50ms */
2515 me_hdr = (const struct gfx_firmware_header_v1_0 *)
2516 adev->gfx.me_fw->data;
2518 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2520 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
2521 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2522 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes);
2524 r = amdgpu_bo_create_reserved(adev, me_hdr->header.ucode_size_bytes,
2525 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2526 &adev->gfx.me.me_fw_obj,
2527 &adev->gfx.me.me_fw_gpu_addr,
2528 (void **)&adev->gfx.me.me_fw_ptr);
2530 dev_err(adev->dev, "(%d) failed to create me fw bo\n", r);
2531 gfx_v10_0_me_fini(adev);
2535 memcpy(adev->gfx.me.me_fw_ptr, fw_data, fw_size);
2537 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj);
2538 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj);
2540 /* Trigger an invalidation of the L1 instruction caches */
2541 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL);
2542 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2543 WREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL, tmp);
2545 /* Wait for invalidation complete */
2546 for (i = 0; i < usec_timeout; i++) {
2547 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL);
2548 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2549 INVALIDATE_CACHE_COMPLETE))
2554 if (i >= usec_timeout) {
2555 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2559 if (amdgpu_emu_mode == 1)
2560 adev->nbio_funcs->hdp_flush(adev, NULL);
2562 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL);
2563 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2564 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2565 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2566 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2567 WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_LO,
2568 adev->gfx.me.me_fw_gpu_addr & 0xFFFFF000);
2569 WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_HI,
2570 upper_32_bits(adev->gfx.me.me_fw_gpu_addr));
2575 static int gfx_v10_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2579 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2582 gfx_v10_0_cp_gfx_enable(adev, false);
2584 r = gfx_v10_0_cp_gfx_load_pfp_microcode(adev);
2586 dev_err(adev->dev, "(%d) failed to load pfp fw\n", r);
2590 r = gfx_v10_0_cp_gfx_load_ce_microcode(adev);
2592 dev_err(adev->dev, "(%d) failed to load ce fw\n", r);
2596 r = gfx_v10_0_cp_gfx_load_me_microcode(adev);
2598 dev_err(adev->dev, "(%d) failed to load me fw\n", r);
2605 static int gfx_v10_0_cp_gfx_start(struct amdgpu_device *adev)
2607 struct amdgpu_ring *ring;
2608 const struct cs_section_def *sect = NULL;
2609 const struct cs_extent_def *ext = NULL;
2614 WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT,
2615 adev->gfx.config.max_hw_contexts - 1);
2616 WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
2618 gfx_v10_0_cp_gfx_enable(adev, true);
2620 ring = &adev->gfx.gfx_ring[0];
2621 r = amdgpu_ring_alloc(ring, gfx_v10_0_get_csb_size(adev) + 4);
2623 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2627 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2628 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2630 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2631 amdgpu_ring_write(ring, 0x80000000);
2632 amdgpu_ring_write(ring, 0x80000000);
2634 for (sect = gfx10_cs_data; sect->section != NULL; ++sect) {
2635 for (ext = sect->section; ext->extent != NULL; ++ext) {
2636 if (sect->id == SECT_CONTEXT) {
2637 amdgpu_ring_write(ring,
2638 PACKET3(PACKET3_SET_CONTEXT_REG,
2640 amdgpu_ring_write(ring, ext->reg_index -
2641 PACKET3_SET_CONTEXT_REG_START);
2642 for (i = 0; i < ext->reg_count; i++)
2643 amdgpu_ring_write(ring, ext->extent[i]);
2649 SOC15_REG_OFFSET(GC, 0, mmPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
2650 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
2651 amdgpu_ring_write(ring, ctx_reg_offset);
2652 amdgpu_ring_write(ring, adev->gfx.config.pa_sc_tile_steering_override);
2654 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2655 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2657 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2658 amdgpu_ring_write(ring, 0);
2660 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2661 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2662 amdgpu_ring_write(ring, 0x8000);
2663 amdgpu_ring_write(ring, 0x8000);
2665 amdgpu_ring_commit(ring);
2667 /* submit cs packet to copy state 0 to next available state */
2668 ring = &adev->gfx.gfx_ring[1];
2669 r = amdgpu_ring_alloc(ring, 2);
2671 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2675 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2676 amdgpu_ring_write(ring, 0);
2678 amdgpu_ring_commit(ring);
2683 static void gfx_v10_0_cp_gfx_switch_pipe(struct amdgpu_device *adev,
2688 tmp = RREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL);
2689 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe);
2691 WREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL, tmp);
2694 static void gfx_v10_0_cp_gfx_set_doorbell(struct amdgpu_device *adev,
2695 struct amdgpu_ring *ring)
2699 tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
2700 if (ring->use_doorbell) {
2701 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2702 DOORBELL_OFFSET, ring->doorbell_index);
2703 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2706 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2709 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
2710 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
2711 DOORBELL_RANGE_LOWER, ring->doorbell_index);
2712 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
2714 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
2715 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
2718 static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
2720 struct amdgpu_ring *ring;
2723 u64 rb_addr, rptr_addr, wptr_gpu_addr;
2726 /* Set the write pointer delay */
2727 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
2729 /* set the RB to use vmid 0 */
2730 WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
2732 /* Init gfx ring 0 for pipe 0 */
2733 mutex_lock(&adev->srbm_mutex);
2734 gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
2735 mutex_unlock(&adev->srbm_mutex);
2736 /* Set ring buffer size */
2737 ring = &adev->gfx.gfx_ring[0];
2738 rb_bufsz = order_base_2(ring->ring_size / 8);
2739 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
2740 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
2742 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
2744 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2746 /* Initialize the ring buffer's write pointers */
2748 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2749 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
2751 /* set the wb address wether it's enabled or not */
2752 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2753 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2754 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
2755 CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
2757 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2758 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO,
2759 lower_32_bits(wptr_gpu_addr));
2760 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI,
2761 upper_32_bits(wptr_gpu_addr));
2764 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2766 rb_addr = ring->gpu_addr >> 8;
2767 WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
2768 WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2770 WREG32_SOC15(GC, 0, mmCP_RB_ACTIVE, 1);
2772 gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
2774 /* Init gfx ring 1 for pipe 1 */
2775 mutex_lock(&adev->srbm_mutex);
2776 gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
2777 mutex_unlock(&adev->srbm_mutex);
2778 ring = &adev->gfx.gfx_ring[1];
2779 rb_bufsz = order_base_2(ring->ring_size / 8);
2780 tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
2781 tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
2782 WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
2783 /* Initialize the ring buffer's write pointers */
2785 WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr));
2786 WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
2787 /* Set the wb address wether it's enabled or not */
2788 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2789 WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
2790 WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
2791 CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
2792 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2793 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO,
2794 lower_32_bits(wptr_gpu_addr));
2795 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI,
2796 upper_32_bits(wptr_gpu_addr));
2799 WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
2801 rb_addr = ring->gpu_addr >> 8;
2802 WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr);
2803 WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr));
2804 WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1);
2806 gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
2808 /* Switch to pipe 0 */
2809 mutex_lock(&adev->srbm_mutex);
2810 gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
2811 mutex_unlock(&adev->srbm_mutex);
2813 /* start the ring */
2814 gfx_v10_0_cp_gfx_start(adev);
2816 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2817 ring = &adev->gfx.gfx_ring[i];
2818 ring->sched.ready = true;
2824 static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2829 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
2831 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
2832 (CP_MEC_CNTL__MEC_ME1_HALT_MASK |
2833 CP_MEC_CNTL__MEC_ME2_HALT_MASK));
2834 for (i = 0; i < adev->gfx.num_compute_rings; i++)
2835 adev->gfx.compute_ring[i].sched.ready = false;
2836 adev->gfx.kiq.ring.sched.ready = false;
2841 static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2843 const struct gfx_firmware_header_v1_0 *mec_hdr;
2844 const __le32 *fw_data;
2847 u32 usec_timeout = 50000; /* Wait for 50 ms */
2849 if (!adev->gfx.mec_fw)
2852 gfx_v10_0_cp_compute_enable(adev, false);
2854 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2855 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2857 fw_data = (const __le32 *)
2858 (adev->gfx.mec_fw->data +
2859 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2861 /* Trigger an invalidation of the L1 instruction caches */
2862 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL);
2863 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2864 WREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL, tmp);
2866 /* Wait for invalidation complete */
2867 for (i = 0; i < usec_timeout; i++) {
2868 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL);
2869 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2870 INVALIDATE_CACHE_COMPLETE))
2875 if (i >= usec_timeout) {
2876 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2880 if (amdgpu_emu_mode == 1)
2881 adev->nbio_funcs->hdp_flush(adev, NULL);
2883 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL);
2884 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2885 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2886 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2887 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
2889 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO, adev->gfx.mec.mec_fw_gpu_addr &
2891 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
2892 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2895 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR, 0);
2897 for (i = 0; i < mec_hdr->jt_size; i++)
2898 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
2899 le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
2901 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version);
2904 * TODO: Loading MEC2 firmware is only necessary if MEC2 should run
2905 * different microcode than MEC1.
2911 static void gfx_v10_0_kiq_setting(struct amdgpu_ring *ring)
2914 struct amdgpu_device *adev = ring->adev;
2916 /* tell RLC which is KIQ queue */
2917 tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
2919 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
2920 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2922 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2925 static int gfx_v10_0_gfx_mqd_init(struct amdgpu_ring *ring)
2927 struct amdgpu_device *adev = ring->adev;
2928 struct v10_gfx_mqd *mqd = ring->mqd_ptr;
2929 uint64_t hqd_gpu_addr, wb_gpu_addr;
2933 /* set up gfx hqd wptr */
2934 mqd->cp_gfx_hqd_wptr = 0;
2935 mqd->cp_gfx_hqd_wptr_hi = 0;
2937 /* set the pointer to the MQD */
2938 mqd->cp_mqd_base_addr = ring->mqd_gpu_addr & 0xfffffffc;
2939 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
2941 /* set up mqd control */
2942 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_MQD_CONTROL);
2943 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0);
2944 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1);
2945 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0);
2946 mqd->cp_gfx_mqd_control = tmp;
2948 /* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */
2949 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_VMID);
2950 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0);
2951 mqd->cp_gfx_hqd_vmid = 0;
2953 /* set up default queue priority level
2954 * 0x0 = low priority, 0x1 = high priority */
2955 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUEUE_PRIORITY);
2956 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, 0);
2957 mqd->cp_gfx_hqd_queue_priority = tmp;
2959 /* set up time quantum */
2960 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUANTUM);
2961 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1);
2962 mqd->cp_gfx_hqd_quantum = tmp;
2964 /* set up gfx hqd base. this is similar as CP_RB_BASE */
2965 hqd_gpu_addr = ring->gpu_addr >> 8;
2966 mqd->cp_gfx_hqd_base = hqd_gpu_addr;
2967 mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr);
2969 /* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */
2970 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2971 mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc;
2972 mqd->cp_gfx_hqd_rptr_addr_hi =
2973 upper_32_bits(wb_gpu_addr) & 0xffff;
2975 /* set up rb_wptr_poll addr */
2976 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2977 mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
2978 mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2980 /* set up the gfx_hqd_control, similar as CP_RB0_CNTL */
2981 rb_bufsz = order_base_2(ring->ring_size / 4) - 1;
2982 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_CNTL);
2983 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz);
2984 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2);
2986 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1);
2988 mqd->cp_gfx_hqd_cntl = tmp;
2990 /* set up cp_doorbell_control */
2991 tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
2992 if (ring->use_doorbell) {
2993 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2994 DOORBELL_OFFSET, ring->doorbell_index);
2995 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2998 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3000 mqd->cp_rb_doorbell_control = tmp;
3002 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3004 mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR);
3006 /* active the queue */
3007 mqd->cp_gfx_hqd_active = 1;
3012 #ifdef BRING_UP_DEBUG
3013 static int gfx_v10_0_gfx_queue_init_register(struct amdgpu_ring *ring)
3015 struct amdgpu_device *adev = ring->adev;
3016 struct v10_gfx_mqd *mqd = ring->mqd_ptr;
3018 /* set mmCP_GFX_HQD_WPTR/_HI to 0 */
3019 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_WPTR, mqd->cp_gfx_hqd_wptr);
3020 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_WPTR_HI, mqd->cp_gfx_hqd_wptr_hi);
3022 /* set GFX_MQD_BASE */
3023 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr);
3024 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi);
3026 /* set GFX_MQD_CONTROL */
3027 WREG32_SOC15(GC, 0, mmCP_GFX_MQD_CONTROL, mqd->cp_gfx_mqd_control);
3029 /* set GFX_HQD_VMID to 0 */
3030 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_VMID, mqd->cp_gfx_hqd_vmid);
3032 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUEUE_PRIORITY,
3033 mqd->cp_gfx_hqd_queue_priority);
3034 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUANTUM, mqd->cp_gfx_hqd_quantum);
3036 /* set GFX_HQD_BASE, similar as CP_RB_BASE */
3037 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_BASE, mqd->cp_gfx_hqd_base);
3038 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_BASE_HI, mqd->cp_gfx_hqd_base_hi);
3040 /* set GFX_HQD_RPTR_ADDR, similar as CP_RB_RPTR */
3041 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR_ADDR, mqd->cp_gfx_hqd_rptr_addr);
3042 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR_ADDR_HI, mqd->cp_gfx_hqd_rptr_addr_hi);
3044 /* set GFX_HQD_CNTL, similar as CP_RB_CNTL */
3045 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_CNTL, mqd->cp_gfx_hqd_cntl);
3047 /* set RB_WPTR_POLL_ADDR */
3048 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, mqd->cp_rb_wptr_poll_addr_lo);
3049 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, mqd->cp_rb_wptr_poll_addr_hi);
3051 /* set RB_DOORBELL_CONTROL */
3052 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, mqd->cp_rb_doorbell_control);
3054 /* active the queue */
3055 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_ACTIVE, mqd->cp_gfx_hqd_active);
3061 static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
3063 struct amdgpu_device *adev = ring->adev;
3064 struct v10_gfx_mqd *mqd = ring->mqd_ptr;
3066 if (!adev->in_gpu_reset && !adev->in_suspend) {
3067 memset((void *)mqd, 0, sizeof(*mqd));
3068 mutex_lock(&adev->srbm_mutex);
3069 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3070 gfx_v10_0_gfx_mqd_init(ring);
3071 #ifdef BRING_UP_DEBUG
3072 gfx_v10_0_gfx_queue_init_register(ring);
3074 nv_grbm_select(adev, 0, 0, 0, 0);
3075 mutex_unlock(&adev->srbm_mutex);
3076 if (adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS])
3077 memcpy(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], mqd, sizeof(*mqd));
3078 } else if (adev->in_gpu_reset) {
3079 /* reset mqd with the backup copy */
3080 if (adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS])
3081 memcpy(mqd, adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], sizeof(*mqd));
3082 /* reset the ring */
3084 amdgpu_ring_clear_ring(ring);
3085 #ifdef BRING_UP_DEBUG
3086 mutex_lock(&adev->srbm_mutex);
3087 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3088 gfx_v10_0_gfx_queue_init_register(ring);
3089 nv_grbm_select(adev, 0, 0, 0, 0);
3090 mutex_unlock(&adev->srbm_mutex);
3093 amdgpu_ring_clear_ring(ring);
3099 #ifndef BRING_UP_DEBUG
3100 static int gfx_v10_0_kiq_enable_kgq(struct amdgpu_device *adev)
3102 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
3103 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
3106 if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
3109 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
3110 adev->gfx.num_gfx_rings);
3112 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
3116 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
3117 kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]);
3119 r = amdgpu_ring_test_ring(kiq_ring);
3121 DRM_ERROR("kfq enable failed\n");
3122 kiq_ring->sched.ready = false;
3128 static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
3131 struct amdgpu_ring *ring;
3133 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3134 ring = &adev->gfx.gfx_ring[i];
3136 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3137 if (unlikely(r != 0))
3140 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3142 r = gfx_v10_0_gfx_init_queue(ring);
3143 amdgpu_bo_kunmap(ring->mqd_obj);
3144 ring->mqd_ptr = NULL;
3146 amdgpu_bo_unreserve(ring->mqd_obj);
3150 #ifndef BRING_UP_DEBUG
3151 r = gfx_v10_0_kiq_enable_kgq(adev);
3155 r = gfx_v10_0_cp_gfx_start(adev);
3159 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3160 ring = &adev->gfx.gfx_ring[i];
3161 ring->sched.ready = true;
3167 static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring)
3169 struct amdgpu_device *adev = ring->adev;
3170 struct v10_compute_mqd *mqd = ring->mqd_ptr;
3171 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
3174 mqd->header = 0xC0310800;
3175 mqd->compute_pipelinestat_enable = 0x00000001;
3176 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3177 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3178 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3179 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3180 mqd->compute_misc_reserved = 0x00000003;
3182 eop_base_addr = ring->eop_gpu_addr >> 8;
3183 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
3184 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
3186 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3187 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
3188 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3189 (order_base_2(GFX10_MEC_HPD_SIZE / 4) - 1));
3191 mqd->cp_hqd_eop_control = tmp;
3193 /* enable doorbell? */
3194 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3196 if (ring->use_doorbell) {
3197 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3198 DOORBELL_OFFSET, ring->doorbell_index);
3199 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3201 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3202 DOORBELL_SOURCE, 0);
3203 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3206 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3210 mqd->cp_hqd_pq_doorbell_control = tmp;
3212 /* disable the queue if it's active */
3214 mqd->cp_hqd_dequeue_request = 0;
3215 mqd->cp_hqd_pq_rptr = 0;
3216 mqd->cp_hqd_pq_wptr_lo = 0;
3217 mqd->cp_hqd_pq_wptr_hi = 0;
3219 /* set the pointer to the MQD */
3220 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
3221 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
3223 /* set MQD vmid to 0 */
3224 tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
3225 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3226 mqd->cp_mqd_control = tmp;
3228 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3229 hqd_gpu_addr = ring->gpu_addr >> 8;
3230 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3231 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3233 /* set up the HQD, this is similar to CP_RB0_CNTL */
3234 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
3235 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3236 (order_base_2(ring->ring_size / 4) - 1));
3237 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3238 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
3240 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
3242 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
3243 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
3244 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3245 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3246 mqd->cp_hqd_pq_control = tmp;
3248 /* set the wb address whether it's enabled or not */
3249 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3250 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3251 mqd->cp_hqd_pq_rptr_report_addr_hi =
3252 upper_32_bits(wb_gpu_addr) & 0xffff;
3254 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3255 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3256 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3257 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3260 /* enable the doorbell if requested */
3261 if (ring->use_doorbell) {
3262 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3263 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3264 DOORBELL_OFFSET, ring->doorbell_index);
3266 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3268 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3269 DOORBELL_SOURCE, 0);
3270 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3274 mqd->cp_hqd_pq_doorbell_control = tmp;
3276 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3278 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
3280 /* set the vmid for the queue */
3281 mqd->cp_hqd_vmid = 0;
3283 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
3284 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
3285 mqd->cp_hqd_persistent_state = tmp;
3287 /* set MIN_IB_AVAIL_SIZE */
3288 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
3289 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
3290 mqd->cp_hqd_ib_control = tmp;
3292 /* activate the queue */
3293 mqd->cp_hqd_active = 1;
3298 static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
3300 struct amdgpu_device *adev = ring->adev;
3301 struct v10_compute_mqd *mqd = ring->mqd_ptr;
3304 /* disable wptr polling */
3305 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3307 /* write the EOP addr */
3308 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
3309 mqd->cp_hqd_eop_base_addr_lo);
3310 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
3311 mqd->cp_hqd_eop_base_addr_hi);
3313 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3314 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
3315 mqd->cp_hqd_eop_control);
3317 /* enable doorbell? */
3318 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3319 mqd->cp_hqd_pq_doorbell_control);
3321 /* disable the queue if it's active */
3322 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3323 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3324 for (j = 0; j < adev->usec_timeout; j++) {
3325 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3329 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3330 mqd->cp_hqd_dequeue_request);
3331 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR,
3332 mqd->cp_hqd_pq_rptr);
3333 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3334 mqd->cp_hqd_pq_wptr_lo);
3335 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3336 mqd->cp_hqd_pq_wptr_hi);
3339 /* set the pointer to the MQD */
3340 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR,
3341 mqd->cp_mqd_base_addr_lo);
3342 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI,
3343 mqd->cp_mqd_base_addr_hi);
3345 /* set MQD vmid to 0 */
3346 WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL,
3347 mqd->cp_mqd_control);
3349 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3350 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE,
3351 mqd->cp_hqd_pq_base_lo);
3352 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI,
3353 mqd->cp_hqd_pq_base_hi);
3355 /* set up the HQD, this is similar to CP_RB0_CNTL */
3356 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL,
3357 mqd->cp_hqd_pq_control);
3359 /* set the wb address whether it's enabled or not */
3360 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3361 mqd->cp_hqd_pq_rptr_report_addr_lo);
3362 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3363 mqd->cp_hqd_pq_rptr_report_addr_hi);
3365 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3366 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
3367 mqd->cp_hqd_pq_wptr_poll_addr_lo);
3368 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3369 mqd->cp_hqd_pq_wptr_poll_addr_hi);
3371 /* enable the doorbell if requested */
3372 if (ring->use_doorbell) {
3373 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
3374 (adev->doorbell_index.kiq * 2) << 2);
3375 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3376 (adev->doorbell_index.userqueue_end * 2) << 2);
3379 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3380 mqd->cp_hqd_pq_doorbell_control);
3382 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3383 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3384 mqd->cp_hqd_pq_wptr_lo);
3385 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3386 mqd->cp_hqd_pq_wptr_hi);
3388 /* set the vmid for the queue */
3389 WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3391 WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE,
3392 mqd->cp_hqd_persistent_state);
3394 /* activate the queue */
3395 WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE,
3396 mqd->cp_hqd_active);
3398 if (ring->use_doorbell)
3399 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3404 static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
3406 struct amdgpu_device *adev = ring->adev;
3407 struct v10_compute_mqd *mqd = ring->mqd_ptr;
3408 int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
3410 gfx_v10_0_kiq_setting(ring);
3412 if (adev->in_gpu_reset) { /* for GPU_RESET case */
3413 /* reset MQD to a clean status */
3414 if (adev->gfx.mec.mqd_backup[mqd_idx])
3415 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
3417 /* reset ring buffer */
3419 amdgpu_ring_clear_ring(ring);
3421 mutex_lock(&adev->srbm_mutex);
3422 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3423 gfx_v10_0_kiq_init_register(ring);
3424 nv_grbm_select(adev, 0, 0, 0, 0);
3425 mutex_unlock(&adev->srbm_mutex);
3427 memset((void *)mqd, 0, sizeof(*mqd));
3428 mutex_lock(&adev->srbm_mutex);
3429 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3430 gfx_v10_0_compute_mqd_init(ring);
3431 gfx_v10_0_kiq_init_register(ring);
3432 nv_grbm_select(adev, 0, 0, 0, 0);
3433 mutex_unlock(&adev->srbm_mutex);
3435 if (adev->gfx.mec.mqd_backup[mqd_idx])
3436 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
3442 static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
3444 struct amdgpu_device *adev = ring->adev;
3445 struct v10_compute_mqd *mqd = ring->mqd_ptr;
3446 int mqd_idx = ring - &adev->gfx.compute_ring[0];
3448 if (!adev->in_gpu_reset && !adev->in_suspend) {
3449 memset((void *)mqd, 0, sizeof(*mqd));
3450 mutex_lock(&adev->srbm_mutex);
3451 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3452 gfx_v10_0_compute_mqd_init(ring);
3453 nv_grbm_select(adev, 0, 0, 0, 0);
3454 mutex_unlock(&adev->srbm_mutex);
3456 if (adev->gfx.mec.mqd_backup[mqd_idx])
3457 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
3458 } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
3459 /* reset MQD to a clean status */
3460 if (adev->gfx.mec.mqd_backup[mqd_idx])
3461 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
3463 /* reset ring buffer */
3465 amdgpu_ring_clear_ring(ring);
3467 amdgpu_ring_clear_ring(ring);
3473 static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev)
3475 struct amdgpu_ring *ring;
3478 ring = &adev->gfx.kiq.ring;
3480 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3481 if (unlikely(r != 0))
3484 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3485 if (unlikely(r != 0))
3488 gfx_v10_0_kiq_init_queue(ring);
3489 amdgpu_bo_kunmap(ring->mqd_obj);
3490 ring->mqd_ptr = NULL;
3491 amdgpu_bo_unreserve(ring->mqd_obj);
3492 ring->sched.ready = true;
3496 static int gfx_v10_0_kcq_resume(struct amdgpu_device *adev)
3498 struct amdgpu_ring *ring = NULL;
3501 gfx_v10_0_cp_compute_enable(adev, true);
3503 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3504 ring = &adev->gfx.compute_ring[i];
3506 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3507 if (unlikely(r != 0))
3509 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3511 r = gfx_v10_0_kcq_init_queue(ring);
3512 amdgpu_bo_kunmap(ring->mqd_obj);
3513 ring->mqd_ptr = NULL;
3515 amdgpu_bo_unreserve(ring->mqd_obj);
3520 r = amdgpu_gfx_enable_kcq(adev);
3525 static int gfx_v10_0_cp_resume(struct amdgpu_device *adev)
3528 struct amdgpu_ring *ring;
3530 if (!(adev->flags & AMD_IS_APU))
3531 gfx_v10_0_enable_gui_idle_interrupt(adev, false);
3533 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
3534 /* legacy firmware loading */
3535 r = gfx_v10_0_cp_gfx_load_microcode(adev);
3539 r = gfx_v10_0_cp_compute_load_microcode(adev);
3544 r = gfx_v10_0_kiq_resume(adev);
3548 r = gfx_v10_0_kcq_resume(adev);
3552 if (!amdgpu_async_gfx_ring) {
3553 r = gfx_v10_0_cp_gfx_resume(adev);
3557 r = gfx_v10_0_cp_async_gfx_ring_resume(adev);
3562 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3563 ring = &adev->gfx.gfx_ring[i];
3564 DRM_INFO("gfx %d ring me %d pipe %d q %d\n",
3565 i, ring->me, ring->pipe, ring->queue);
3566 r = amdgpu_ring_test_ring(ring);
3568 ring->sched.ready = false;
3573 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3574 ring = &adev->gfx.compute_ring[i];
3575 ring->sched.ready = true;
3576 DRM_INFO("compute ring %d mec %d pipe %d q %d\n",
3577 i, ring->me, ring->pipe, ring->queue);
3578 r = amdgpu_ring_test_ring(ring);
3580 ring->sched.ready = false;
3586 static void gfx_v10_0_cp_enable(struct amdgpu_device *adev, bool enable)
3588 gfx_v10_0_cp_gfx_enable(adev, enable);
3589 gfx_v10_0_cp_compute_enable(adev, enable);
3592 static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev)
3594 uint32_t data, pattern = 0xDEADBEEF;
3596 /* check if mmVGT_ESGS_RING_SIZE_UMD
3597 * has been remapped to mmVGT_ESGS_RING_SIZE */
3598 data = RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE);
3600 WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, 0);
3602 WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, pattern);
3604 if (RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE) == pattern) {
3605 WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, data);
3608 WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, data);
3613 static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev)
3617 /* initialize cam_index to 0
3618 * index will auto-inc after each data writting */
3619 WREG32_SOC15(GC, 0, mmGRBM_CAM_INDEX, 0);
3621 /* mmVGT_TF_RING_SIZE_UMD -> mmVGT_TF_RING_SIZE */
3622 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE_UMD) <<
3623 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3624 (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE) <<
3625 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3626 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3627 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3629 /* mmVGT_TF_MEMORY_BASE_UMD -> mmVGT_TF_MEMORY_BASE */
3630 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_UMD) <<
3631 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3632 (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE) <<
3633 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3634 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3635 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3637 /* mmVGT_TF_MEMORY_BASE_HI_UMD -> mmVGT_TF_MEMORY_BASE_HI */
3638 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_HI_UMD) <<
3639 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3640 (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_HI) <<
3641 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3642 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3643 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3645 /* mmVGT_HS_OFFCHIP_PARAM_UMD -> mmVGT_HS_OFFCHIP_PARAM */
3646 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_HS_OFFCHIP_PARAM_UMD) <<
3647 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3648 (SOC15_REG_OFFSET(GC, 0, mmVGT_HS_OFFCHIP_PARAM) <<
3649 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3650 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3651 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3653 /* mmVGT_ESGS_RING_SIZE_UMD -> mmVGT_ESGS_RING_SIZE */
3654 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_ESGS_RING_SIZE_UMD) <<
3655 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3656 (SOC15_REG_OFFSET(GC, 0, mmVGT_ESGS_RING_SIZE) <<
3657 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3658 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3659 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3661 /* mmVGT_GSVS_RING_SIZE_UMD -> mmVGT_GSVS_RING_SIZE */
3662 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_GSVS_RING_SIZE_UMD) <<
3663 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3664 (SOC15_REG_OFFSET(GC, 0, mmVGT_GSVS_RING_SIZE) <<
3665 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3666 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3667 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3669 /* mmSPI_CONFIG_CNTL_REMAP -> mmSPI_CONFIG_CNTL */
3670 data = (SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_REMAP) <<
3671 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3672 (SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL) <<
3673 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3674 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3675 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3678 static int gfx_v10_0_hw_init(void *handle)
3681 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3683 r = gfx_v10_0_csb_vram_pin(adev);
3687 if (!amdgpu_emu_mode)
3688 gfx_v10_0_init_golden_registers(adev);
3690 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
3692 * For gfx 10, rlc firmware loading relies on smu firmware is
3693 * loaded firstly, so in direct type, it has to load smc ucode
3696 r = smu_load_microcode(&adev->smu);
3700 r = smu_check_fw_status(&adev->smu);
3702 pr_err("SMC firmware status is not correct\n");
3707 /* if GRBM CAM not remapped, set up the remapping */
3708 if (!gfx_v10_0_check_grbm_cam_remapping(adev))
3709 gfx_v10_0_setup_grbm_cam_remapping(adev);
3711 gfx_v10_0_constants_init(adev);
3713 r = gfx_v10_0_rlc_resume(adev);
3718 * init golden registers and rlc resume may override some registers,
3719 * reconfig them here
3721 gfx_v10_0_tcp_harvest(adev);
3723 r = gfx_v10_0_cp_resume(adev);
3730 #ifndef BRING_UP_DEBUG
3731 static int gfx_v10_0_kiq_disable_kgq(struct amdgpu_device *adev)
3733 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
3734 struct amdgpu_ring *kiq_ring = &kiq->ring;
3737 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
3740 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
3741 adev->gfx.num_gfx_rings))
3744 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
3745 kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i],
3746 PREEMPT_QUEUES, 0, 0);
3748 return amdgpu_ring_test_ring(kiq_ring);
3752 static int gfx_v10_0_hw_fini(void *handle)
3754 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3757 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3758 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3759 #ifndef BRING_UP_DEBUG
3760 if (amdgpu_async_gfx_ring) {
3761 r = gfx_v10_0_kiq_disable_kgq(adev);
3763 DRM_ERROR("KGQ disable failed\n");
3766 if (amdgpu_gfx_disable_kcq(adev))
3767 DRM_ERROR("KCQ disable failed\n");
3768 if (amdgpu_sriov_vf(adev)) {
3769 pr_debug("For SRIOV client, shouldn't do anything.\n");
3772 gfx_v10_0_cp_enable(adev, false);
3773 gfx_v10_0_enable_gui_idle_interrupt(adev, false);
3774 gfx_v10_0_csb_vram_unpin(adev);
3779 static int gfx_v10_0_suspend(void *handle)
3781 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3783 adev->in_suspend = true;
3784 return gfx_v10_0_hw_fini(adev);
3787 static int gfx_v10_0_resume(void *handle)
3789 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3792 r = gfx_v10_0_hw_init(adev);
3793 adev->in_suspend = false;
3797 static bool gfx_v10_0_is_idle(void *handle)
3799 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3801 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
3802 GRBM_STATUS, GUI_ACTIVE))
3808 static int gfx_v10_0_wait_for_idle(void *handle)
3812 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3814 for (i = 0; i < adev->usec_timeout; i++) {
3815 /* read MC_STATUS */
3816 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS) &
3817 GRBM_STATUS__GUI_ACTIVE_MASK;
3819 if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
3826 static int gfx_v10_0_soft_reset(void *handle)
3828 u32 grbm_soft_reset = 0;
3830 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3833 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
3834 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
3835 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
3836 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__DB_BUSY_MASK |
3837 GRBM_STATUS__CB_BUSY_MASK | GRBM_STATUS__GDS_BUSY_MASK |
3838 GRBM_STATUS__SPI_BUSY_MASK | GRBM_STATUS__GE_BUSY_NO_DMA_MASK
3839 | GRBM_STATUS__BCI_BUSY_MASK)) {
3840 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3841 GRBM_SOFT_RESET, SOFT_RESET_CP,
3843 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3844 GRBM_SOFT_RESET, SOFT_RESET_GFX,
3848 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
3849 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3850 GRBM_SOFT_RESET, SOFT_RESET_CP,
3855 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
3856 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
3857 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3858 GRBM_SOFT_RESET, SOFT_RESET_RLC,
3861 if (grbm_soft_reset) {
3863 gfx_v10_0_rlc_stop(adev);
3865 /* Disable GFX parsing/prefetching */
3866 gfx_v10_0_cp_gfx_enable(adev, false);
3868 /* Disable MEC parsing/prefetching */
3869 gfx_v10_0_cp_compute_enable(adev, false);
3871 if (grbm_soft_reset) {
3872 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3873 tmp |= grbm_soft_reset;
3874 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3875 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3876 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3880 tmp &= ~grbm_soft_reset;
3881 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3882 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3885 /* Wait a little for things to settle down */
3891 static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
3895 mutex_lock(&adev->gfx.gpu_clock_mutex);
3896 WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
3897 clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
3898 ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
3899 mutex_unlock(&adev->gfx.gpu_clock_mutex);
3903 static void gfx_v10_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
3905 uint32_t gds_base, uint32_t gds_size,
3906 uint32_t gws_base, uint32_t gws_size,
3907 uint32_t oa_base, uint32_t oa_size)
3909 struct amdgpu_device *adev = ring->adev;
3912 gfx_v10_0_write_data_to_reg(ring, 0, false,
3913 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
3917 gfx_v10_0_write_data_to_reg(ring, 0, false,
3918 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
3922 gfx_v10_0_write_data_to_reg(ring, 0, false,
3923 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
3924 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
3927 gfx_v10_0_write_data_to_reg(ring, 0, false,
3928 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
3929 (1 << (oa_size + oa_base)) - (1 << oa_base));
3932 static int gfx_v10_0_early_init(void *handle)
3934 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3936 adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS;
3937 adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
3939 gfx_v10_0_set_kiq_pm4_funcs(adev);
3940 gfx_v10_0_set_ring_funcs(adev);
3941 gfx_v10_0_set_irq_funcs(adev);
3942 gfx_v10_0_set_gds_init(adev);
3943 gfx_v10_0_set_rlc_funcs(adev);
3948 static int gfx_v10_0_late_init(void *handle)
3950 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3953 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
3957 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
3964 static bool gfx_v10_0_is_rlc_enabled(struct amdgpu_device *adev)
3968 /* if RLC is not enabled, do nothing */
3969 rlc_cntl = RREG32_SOC15(GC, 0, mmRLC_CNTL);
3970 return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false;
3973 static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev)
3978 data = RLC_SAFE_MODE__CMD_MASK;
3979 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
3980 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3982 /* wait for RLC_SAFE_MODE */
3983 for (i = 0; i < adev->usec_timeout; i++) {
3984 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
3990 static void gfx_v10_0_unset_safe_mode(struct amdgpu_device *adev)
3994 data = RLC_SAFE_MODE__CMD_MASK;
3995 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3998 static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
4003 /* It is disabled by HW by default */
4004 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
4005 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
4006 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4007 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4008 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4009 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4011 /* only for Vega10 & Raven1 */
4012 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
4015 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4017 /* MGLS is a global flag to control all MGLS in GFX */
4018 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
4019 /* 2 - RLC memory Light sleep */
4020 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
4021 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4022 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4024 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4026 /* 3 - CP memory Light sleep */
4027 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
4028 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4029 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4031 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4035 /* 1 - MGCG_OVERRIDE */
4036 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4037 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
4038 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4039 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4040 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4042 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4044 /* 2 - disable MGLS in RLC */
4045 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4046 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
4047 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4048 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4051 /* 3 - disable MGLS in CP */
4052 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4053 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
4054 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4055 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4060 static void gfx_v10_0_update_3d_clock_gating(struct amdgpu_device *adev,
4065 /* Enable 3D CGCG/CGLS */
4066 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
4067 /* write cmd to clear cgcg/cgls ov */
4068 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4069 /* unset CGCG override */
4070 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
4071 /* update CGCG and CGLS override bits */
4073 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4074 /* enable 3Dcgcg FSM(0x0000363f) */
4075 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4076 data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4077 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4078 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
4079 data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4080 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
4082 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4084 /* set IDLE_POLL_COUNT(0x00900100) */
4085 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
4086 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4087 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4089 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
4091 /* Disable CGCG/CGLS */
4092 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4093 /* disable cgcg, cgls should be disabled */
4094 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
4095 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
4096 /* disable cgcg and cgls in FSM */
4098 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4102 static void gfx_v10_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
4107 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
4108 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4109 /* unset CGCG override */
4110 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
4111 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4112 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4114 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4115 /* update CGCG and CGLS override bits */
4117 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4119 /* enable cgcg FSM(0x0000363F) */
4120 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4121 data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4122 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4123 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4124 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4125 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
4127 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
4129 /* set IDLE_POLL_COUNT(0x00900100) */
4130 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
4131 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4132 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4134 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
4136 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4137 /* reset CGCG/CGLS bits */
4138 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
4139 /* disable cgcg and cgls in FSM */
4141 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
4145 static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
4148 amdgpu_gfx_rlc_enter_safe_mode(adev);
4151 /* CGCG/CGLS should be enabled after MGCG/MGLS
4152 * === MGCG + MGLS ===
4154 gfx_v10_0_update_medium_grain_clock_gating(adev, enable);
4155 /* === CGCG /CGLS for GFX 3D Only === */
4156 gfx_v10_0_update_3d_clock_gating(adev, enable);
4157 /* === CGCG + CGLS === */
4158 gfx_v10_0_update_coarse_grain_clock_gating(adev, enable);
4160 /* CGCG/CGLS should be disabled before MGCG/MGLS
4161 * === CGCG + CGLS ===
4163 gfx_v10_0_update_coarse_grain_clock_gating(adev, enable);
4164 /* === CGCG /CGLS for GFX 3D Only === */
4165 gfx_v10_0_update_3d_clock_gating(adev, enable);
4166 /* === MGCG + MGLS === */
4167 gfx_v10_0_update_medium_grain_clock_gating(adev, enable);
4170 if (adev->cg_flags &
4171 (AMD_CG_SUPPORT_GFX_MGCG |
4172 AMD_CG_SUPPORT_GFX_CGLS |
4173 AMD_CG_SUPPORT_GFX_CGCG |
4174 AMD_CG_SUPPORT_GFX_CGLS |
4175 AMD_CG_SUPPORT_GFX_3D_CGCG |
4176 AMD_CG_SUPPORT_GFX_3D_CGLS))
4177 gfx_v10_0_enable_gui_idle_interrupt(adev, enable);
4179 amdgpu_gfx_rlc_exit_safe_mode(adev);
4184 static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
4185 .is_rlc_enabled = gfx_v10_0_is_rlc_enabled,
4186 .set_safe_mode = gfx_v10_0_set_safe_mode,
4187 .unset_safe_mode = gfx_v10_0_unset_safe_mode,
4188 .init = gfx_v10_0_rlc_init,
4189 .get_csb_size = gfx_v10_0_get_csb_size,
4190 .get_csb_buffer = gfx_v10_0_get_csb_buffer,
4191 .resume = gfx_v10_0_rlc_resume,
4192 .stop = gfx_v10_0_rlc_stop,
4193 .reset = gfx_v10_0_rlc_reset,
4194 .start = gfx_v10_0_rlc_start
4197 static int gfx_v10_0_set_powergating_state(void *handle,
4198 enum amd_powergating_state state)
4200 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4201 bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
4202 switch (adev->asic_type) {
4206 amdgpu_gfx_off_ctrl(adev, false);
4207 cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
4209 amdgpu_gfx_off_ctrl(adev, true);
4217 static int gfx_v10_0_set_clockgating_state(void *handle,
4218 enum amd_clockgating_state state)
4220 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4222 switch (adev->asic_type) {
4226 gfx_v10_0_update_gfx_clock_gating(adev,
4227 state == AMD_CG_STATE_GATE ? true : false);
4235 static void gfx_v10_0_get_clockgating_state(void *handle, u32 *flags)
4237 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4240 /* AMD_CG_SUPPORT_GFX_MGCG */
4241 data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4242 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
4243 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
4245 /* AMD_CG_SUPPORT_GFX_CGCG */
4246 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4247 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
4248 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
4250 /* AMD_CG_SUPPORT_GFX_CGLS */
4251 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
4252 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
4254 /* AMD_CG_SUPPORT_GFX_RLC_LS */
4255 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4256 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
4257 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
4259 /* AMD_CG_SUPPORT_GFX_CP_LS */
4260 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4261 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
4262 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
4264 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
4265 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4266 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
4267 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
4269 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
4270 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
4271 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
4274 static u64 gfx_v10_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
4276 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx10 is 32bit rptr*/
4279 static u64 gfx_v10_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
4281 struct amdgpu_device *adev = ring->adev;
4284 /* XXX check if swapping is necessary on BE */
4285 if (ring->use_doorbell) {
4286 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
4288 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
4289 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
4295 static void gfx_v10_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
4297 struct amdgpu_device *adev = ring->adev;
4299 if (ring->use_doorbell) {
4300 /* XXX check if swapping is necessary on BE */
4301 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
4302 WDOORBELL64(ring->doorbell_index, ring->wptr);
4304 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
4305 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
4309 static u64 gfx_v10_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
4311 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx10 hardware is 32bit rptr */
4314 static u64 gfx_v10_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
4318 /* XXX check if swapping is necessary on BE */
4319 if (ring->use_doorbell)
4320 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
4326 static void gfx_v10_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
4328 struct amdgpu_device *adev = ring->adev;
4330 /* XXX check if swapping is necessary on BE */
4331 if (ring->use_doorbell) {
4332 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
4333 WDOORBELL64(ring->doorbell_index, ring->wptr);
4335 BUG(); /* only DOORBELL method supported on gfx10 now */
4339 static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
4341 struct amdgpu_device *adev = ring->adev;
4342 u32 ref_and_mask, reg_mem_engine;
4343 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
4345 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
4348 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
4351 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
4358 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
4359 reg_mem_engine = 1; /* pfp */
4362 gfx_v10_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
4363 adev->nbio_funcs->get_hdp_flush_req_offset(adev),
4364 adev->nbio_funcs->get_hdp_flush_done_offset(adev),
4365 ref_and_mask, ref_and_mask, 0x20);
4368 static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
4369 struct amdgpu_job *job,
4370 struct amdgpu_ib *ib,
4373 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
4374 u32 header, control = 0;
4376 /* Prevent a hw deadlock due to a wave ID mismatch between ME and GDS.
4377 * This resets the wave ID counters. (needed by transform feedback)
4378 * TODO: This might only be needed on a VMID switch when we change
4379 * the GDS OA mapping, not sure.
4381 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
4382 amdgpu_ring_write(ring, mmVGT_GS_MAX_WAVE_ID);
4383 amdgpu_ring_write(ring, ring->adev->gds.vgt_gs_max_wave_id);
4385 if (ib->flags & AMDGPU_IB_FLAG_CE)
4386 header = PACKET3(PACKET3_INDIRECT_BUFFER_CNST, 2);
4388 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
4390 control |= ib->length_dw | (vmid << 24);
4392 if (amdgpu_mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
4393 control |= INDIRECT_BUFFER_PRE_ENB(1);
4395 if (flags & AMDGPU_IB_PREEMPTED)
4396 control |= INDIRECT_BUFFER_PRE_RESUME(1);
4398 if (!(ib->flags & AMDGPU_IB_FLAG_CE))
4399 gfx_v10_0_ring_emit_de_meta(ring,
4400 flags & AMDGPU_IB_PREEMPTED ? true : false);
4403 amdgpu_ring_write(ring, header);
4404 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
4405 amdgpu_ring_write(ring,
4409 lower_32_bits(ib->gpu_addr));
4410 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4411 amdgpu_ring_write(ring, control);
4414 static void gfx_v10_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
4415 struct amdgpu_job *job,
4416 struct amdgpu_ib *ib,
4419 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
4420 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
4422 /* Currently, there is a high possibility to get wave ID mismatch
4423 * between ME and GDS, leading to a hw deadlock, because ME generates
4424 * different wave IDs than the GDS expects. This situation happens
4425 * randomly when at least 5 compute pipes use GDS ordered append.
4426 * The wave IDs generated by ME are also wrong after suspend/resume.
4427 * Those are probably bugs somewhere else in the kernel driver.
4429 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
4430 * GDS to 0 for this ring (me/pipe).
4432 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
4433 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
4434 amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
4435 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
4438 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
4439 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
4440 amdgpu_ring_write(ring,
4444 lower_32_bits(ib->gpu_addr));
4445 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4446 amdgpu_ring_write(ring, control);
4449 static void gfx_v10_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
4450 u64 seq, unsigned flags)
4452 struct amdgpu_device *adev = ring->adev;
4453 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
4454 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
4456 /* Interrupt not work fine on GFX10.1 model yet. Use fallback instead */
4457 if (adev->pdev->device == 0x50)
4460 /* RELEASE_MEM - flush caches, send int */
4461 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
4462 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ |
4463 PACKET3_RELEASE_MEM_GCR_GL2_WB |
4464 PACKET3_RELEASE_MEM_GCR_GLM_INV | /* must be set with GLM_WB */
4465 PACKET3_RELEASE_MEM_GCR_GLM_WB |
4466 PACKET3_RELEASE_MEM_CACHE_POLICY(3) |
4467 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
4468 PACKET3_RELEASE_MEM_EVENT_INDEX(5)));
4469 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) |
4470 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0)));
4473 * the address should be Qword aligned if 64bit write, Dword
4474 * aligned if only send 32bit data low (discard data high)
4480 amdgpu_ring_write(ring, lower_32_bits(addr));
4481 amdgpu_ring_write(ring, upper_32_bits(addr));
4482 amdgpu_ring_write(ring, lower_32_bits(seq));
4483 amdgpu_ring_write(ring, upper_32_bits(seq));
4484 amdgpu_ring_write(ring, 0);
4487 static void gfx_v10_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
4489 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
4490 uint32_t seq = ring->fence_drv.sync_seq;
4491 uint64_t addr = ring->fence_drv.gpu_addr;
4493 gfx_v10_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr),
4494 upper_32_bits(addr), seq, 0xffffffff, 4);
4497 static void gfx_v10_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4498 unsigned vmid, uint64_t pd_addr)
4500 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
4502 /* compute doesn't have PFP */
4503 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
4504 /* sync PFP to ME, otherwise we might get invalid PFP reads */
4505 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
4506 amdgpu_ring_write(ring, 0x0);
4510 static void gfx_v10_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
4511 u64 seq, unsigned int flags)
4513 struct amdgpu_device *adev = ring->adev;
4515 /* we only allocate 32bit for each seq wb address */
4516 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
4518 /* write fence seq to the "addr" */
4519 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4520 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4521 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
4522 amdgpu_ring_write(ring, lower_32_bits(addr));
4523 amdgpu_ring_write(ring, upper_32_bits(addr));
4524 amdgpu_ring_write(ring, lower_32_bits(seq));
4526 if (flags & AMDGPU_FENCE_FLAG_INT) {
4527 /* set register to trigger INT */
4528 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4529 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4530 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
4531 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
4532 amdgpu_ring_write(ring, 0);
4533 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
4537 static void gfx_v10_0_ring_emit_sb(struct amdgpu_ring *ring)
4539 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4540 amdgpu_ring_write(ring, 0);
4543 static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
4548 gfx_v10_0_ring_emit_ce_meta(ring,
4549 flags & AMDGPU_IB_PREEMPTED ? true : false);
4551 gfx_v10_0_ring_emit_tmz(ring, true);
4553 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
4554 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
4555 /* set load_global_config & load_global_uconfig */
4557 /* set load_cs_sh_regs */
4559 /* set load_per_context_state & load_gfx_sh_regs for GFX */
4562 /* set load_ce_ram if preamble presented */
4563 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
4566 /* still load_ce_ram if this is the first time preamble presented
4567 * although there is no context switch happens.
4569 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
4573 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4574 amdgpu_ring_write(ring, dw2);
4575 amdgpu_ring_write(ring, 0);
4578 static unsigned gfx_v10_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
4582 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
4583 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
4584 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
4585 amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
4586 ret = ring->wptr & ring->buf_mask;
4587 amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
4592 static void gfx_v10_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
4595 BUG_ON(offset > ring->buf_mask);
4596 BUG_ON(ring->ring[offset] != 0x55aa55aa);
4598 cur = (ring->wptr - 1) & ring->buf_mask;
4599 if (likely(cur > offset))
4600 ring->ring[offset] = cur - offset;
4602 ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
4605 static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring)
4608 struct amdgpu_device *adev = ring->adev;
4609 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
4610 struct amdgpu_ring *kiq_ring = &kiq->ring;
4612 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
4615 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size))
4618 /* assert preemption condition */
4619 amdgpu_ring_set_preempt_cond_exec(ring, false);
4621 /* assert IB preemption, emit the trailing fence */
4622 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP,
4623 ring->trail_fence_gpu_addr,
4625 amdgpu_ring_commit(kiq_ring);
4627 /* poll the trailing fence */
4628 for (i = 0; i < adev->usec_timeout; i++) {
4629 if (ring->trail_seq ==
4630 le32_to_cpu(*(ring->trail_fence_cpu_addr)))
4635 if (i >= adev->usec_timeout) {
4637 DRM_ERROR("ring %d failed to preempt ib\n", ring->idx);
4640 /* deassert preemption condition */
4641 amdgpu_ring_set_preempt_cond_exec(ring, true);
4645 static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume)
4647 struct amdgpu_device *adev = ring->adev;
4648 struct v10_ce_ib_state ce_payload = {0};
4652 cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
4653 csa_addr = amdgpu_csa_vaddr(ring->adev);
4655 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4656 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
4657 WRITE_DATA_DST_SEL(8) |
4659 WRITE_DATA_CACHE_POLICY(0));
4660 amdgpu_ring_write(ring, lower_32_bits(csa_addr +
4661 offsetof(struct v10_gfx_meta_data, ce_payload)));
4662 amdgpu_ring_write(ring, upper_32_bits(csa_addr +
4663 offsetof(struct v10_gfx_meta_data, ce_payload)));
4666 amdgpu_ring_write_multiple(ring, adev->virt.csa_cpu_addr +
4667 offsetof(struct v10_gfx_meta_data,
4669 sizeof(ce_payload) >> 2);
4671 amdgpu_ring_write_multiple(ring, (void *)&ce_payload,
4672 sizeof(ce_payload) >> 2);
4675 static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
4677 struct amdgpu_device *adev = ring->adev;
4678 struct v10_de_ib_state de_payload = {0};
4679 uint64_t csa_addr, gds_addr;
4682 csa_addr = amdgpu_csa_vaddr(ring->adev);
4683 gds_addr = ALIGN(csa_addr + AMDGPU_CSA_SIZE - adev->gds.gds_size,
4685 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
4686 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
4688 cnt = (sizeof(de_payload) >> 2) + 4 - 2;
4689 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4690 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
4691 WRITE_DATA_DST_SEL(8) |
4693 WRITE_DATA_CACHE_POLICY(0));
4694 amdgpu_ring_write(ring, lower_32_bits(csa_addr +
4695 offsetof(struct v10_gfx_meta_data, de_payload)));
4696 amdgpu_ring_write(ring, upper_32_bits(csa_addr +
4697 offsetof(struct v10_gfx_meta_data, de_payload)));
4700 amdgpu_ring_write_multiple(ring, adev->virt.csa_cpu_addr +
4701 offsetof(struct v10_gfx_meta_data,
4703 sizeof(de_payload) >> 2);
4705 amdgpu_ring_write_multiple(ring, (void *)&de_payload,
4706 sizeof(de_payload) >> 2);
4709 static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
4711 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
4712 amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
4715 static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
4717 struct amdgpu_device *adev = ring->adev;
4719 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4720 amdgpu_ring_write(ring, 0 | /* src: register*/
4721 (5 << 8) | /* dst: memory */
4722 (1 << 20)); /* write confirm */
4723 amdgpu_ring_write(ring, reg);
4724 amdgpu_ring_write(ring, 0);
4725 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4726 adev->virt.reg_val_offs * 4));
4727 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4728 adev->virt.reg_val_offs * 4));
4731 static void gfx_v10_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
4736 switch (ring->funcs->type) {
4737 case AMDGPU_RING_TYPE_GFX:
4738 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
4740 case AMDGPU_RING_TYPE_KIQ:
4741 cmd = (1 << 16); /* no inc addr */
4747 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4748 amdgpu_ring_write(ring, cmd);
4749 amdgpu_ring_write(ring, reg);
4750 amdgpu_ring_write(ring, 0);
4751 amdgpu_ring_write(ring, val);
4754 static void gfx_v10_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
4755 uint32_t val, uint32_t mask)
4757 gfx_v10_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
4761 gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4762 uint32_t me, uint32_t pipe,
4763 enum amdgpu_interrupt_state state)
4765 uint32_t cp_int_cntl, cp_int_cntl_reg;
4770 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0);
4773 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING1);
4776 DRM_DEBUG("invalid pipe %d\n", pipe);
4780 DRM_DEBUG("invalid me %d\n", me);
4785 case AMDGPU_IRQ_STATE_DISABLE:
4786 cp_int_cntl = RREG32(cp_int_cntl_reg);
4787 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4788 TIME_STAMP_INT_ENABLE, 0);
4789 WREG32(cp_int_cntl_reg, cp_int_cntl);
4791 case AMDGPU_IRQ_STATE_ENABLE:
4792 cp_int_cntl = RREG32(cp_int_cntl_reg);
4793 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4794 TIME_STAMP_INT_ENABLE, 1);
4795 WREG32(cp_int_cntl_reg, cp_int_cntl);
4802 static void gfx_v10_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
4804 enum amdgpu_interrupt_state state)
4806 u32 mec_int_cntl, mec_int_cntl_reg;
4809 * amdgpu controls only the first MEC. That's why this function only
4810 * handles the setting of interrupts for this specific MEC. All other
4811 * pipes' interrupts are set by amdkfd.
4817 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
4820 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
4823 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
4826 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
4829 DRM_DEBUG("invalid pipe %d\n", pipe);
4833 DRM_DEBUG("invalid me %d\n", me);
4838 case AMDGPU_IRQ_STATE_DISABLE:
4839 mec_int_cntl = RREG32(mec_int_cntl_reg);
4840 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4841 TIME_STAMP_INT_ENABLE, 0);
4842 WREG32(mec_int_cntl_reg, mec_int_cntl);
4844 case AMDGPU_IRQ_STATE_ENABLE:
4845 mec_int_cntl = RREG32(mec_int_cntl_reg);
4846 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4847 TIME_STAMP_INT_ENABLE, 1);
4848 WREG32(mec_int_cntl_reg, mec_int_cntl);
4855 static int gfx_v10_0_set_eop_interrupt_state(struct amdgpu_device *adev,
4856 struct amdgpu_irq_src *src,
4858 enum amdgpu_interrupt_state state)
4861 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
4862 gfx_v10_0_set_gfx_eop_interrupt_state(adev, 0, 0, state);
4864 case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP:
4865 gfx_v10_0_set_gfx_eop_interrupt_state(adev, 0, 1, state);
4867 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
4868 gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
4870 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
4871 gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
4873 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
4874 gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
4876 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
4877 gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
4879 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
4880 gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
4882 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
4883 gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
4885 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
4886 gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
4888 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
4889 gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
4897 static int gfx_v10_0_eop_irq(struct amdgpu_device *adev,
4898 struct amdgpu_irq_src *source,
4899 struct amdgpu_iv_entry *entry)
4902 u8 me_id, pipe_id, queue_id;
4903 struct amdgpu_ring *ring;
4905 DRM_DEBUG("IH: CP EOP\n");
4906 me_id = (entry->ring_id & 0x0c) >> 2;
4907 pipe_id = (entry->ring_id & 0x03) >> 0;
4908 queue_id = (entry->ring_id & 0x70) >> 4;
4913 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
4915 amdgpu_fence_process(&adev->gfx.gfx_ring[1]);
4919 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4920 ring = &adev->gfx.compute_ring[i];
4921 /* Per-queue interrupt is supported for MEC starting from VI.
4922 * The interrupt can only be enabled/disabled per pipe instead of per queue.
4924 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
4925 amdgpu_fence_process(ring);
4932 static int gfx_v10_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4933 struct amdgpu_irq_src *source,
4935 enum amdgpu_interrupt_state state)
4938 case AMDGPU_IRQ_STATE_DISABLE:
4939 case AMDGPU_IRQ_STATE_ENABLE:
4940 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4941 PRIV_REG_INT_ENABLE,
4942 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4951 static int gfx_v10_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
4952 struct amdgpu_irq_src *source,
4954 enum amdgpu_interrupt_state state)
4957 case AMDGPU_IRQ_STATE_DISABLE:
4958 case AMDGPU_IRQ_STATE_ENABLE:
4959 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4960 PRIV_INSTR_INT_ENABLE,
4961 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4969 static void gfx_v10_0_handle_priv_fault(struct amdgpu_device *adev,
4970 struct amdgpu_iv_entry *entry)
4972 u8 me_id, pipe_id, queue_id;
4973 struct amdgpu_ring *ring;
4976 me_id = (entry->ring_id & 0x0c) >> 2;
4977 pipe_id = (entry->ring_id & 0x03) >> 0;
4978 queue_id = (entry->ring_id & 0x70) >> 4;
4982 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4983 ring = &adev->gfx.gfx_ring[i];
4984 /* we only enabled 1 gfx queue per pipe for now */
4985 if (ring->me == me_id && ring->pipe == pipe_id)
4986 drm_sched_fault(&ring->sched);
4991 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4992 ring = &adev->gfx.compute_ring[i];
4993 if (ring->me == me_id && ring->pipe == pipe_id &&
4994 ring->queue == queue_id)
4995 drm_sched_fault(&ring->sched);
5003 static int gfx_v10_0_priv_reg_irq(struct amdgpu_device *adev,
5004 struct amdgpu_irq_src *source,
5005 struct amdgpu_iv_entry *entry)
5007 DRM_ERROR("Illegal register access in command stream\n");
5008 gfx_v10_0_handle_priv_fault(adev, entry);
5012 static int gfx_v10_0_priv_inst_irq(struct amdgpu_device *adev,
5013 struct amdgpu_irq_src *source,
5014 struct amdgpu_iv_entry *entry)
5016 DRM_ERROR("Illegal instruction in command stream\n");
5017 gfx_v10_0_handle_priv_fault(adev, entry);
5021 static int gfx_v10_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
5022 struct amdgpu_irq_src *src,
5024 enum amdgpu_interrupt_state state)
5026 uint32_t tmp, target;
5027 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
5030 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
5032 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME2_PIPE0_INT_CNTL);
5033 target += ring->pipe;
5036 case AMDGPU_CP_KIQ_IRQ_DRIVER0:
5037 if (state == AMDGPU_IRQ_STATE_DISABLE) {
5038 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
5039 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
5040 GENERIC2_INT_ENABLE, 0);
5041 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
5043 tmp = RREG32(target);
5044 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
5045 GENERIC2_INT_ENABLE, 0);
5046 WREG32(target, tmp);
5048 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
5049 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
5050 GENERIC2_INT_ENABLE, 1);
5051 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
5053 tmp = RREG32(target);
5054 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
5055 GENERIC2_INT_ENABLE, 1);
5056 WREG32(target, tmp);
5060 BUG(); /* kiq only support GENERIC2_INT now */
5066 static int gfx_v10_0_kiq_irq(struct amdgpu_device *adev,
5067 struct amdgpu_irq_src *source,
5068 struct amdgpu_iv_entry *entry)
5070 u8 me_id, pipe_id, queue_id;
5071 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
5073 me_id = (entry->ring_id & 0x0c) >> 2;
5074 pipe_id = (entry->ring_id & 0x03) >> 0;
5075 queue_id = (entry->ring_id & 0x70) >> 4;
5076 DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n",
5077 me_id, pipe_id, queue_id);
5079 amdgpu_fence_process(ring);
5083 static const struct amd_ip_funcs gfx_v10_0_ip_funcs = {
5084 .name = "gfx_v10_0",
5085 .early_init = gfx_v10_0_early_init,
5086 .late_init = gfx_v10_0_late_init,
5087 .sw_init = gfx_v10_0_sw_init,
5088 .sw_fini = gfx_v10_0_sw_fini,
5089 .hw_init = gfx_v10_0_hw_init,
5090 .hw_fini = gfx_v10_0_hw_fini,
5091 .suspend = gfx_v10_0_suspend,
5092 .resume = gfx_v10_0_resume,
5093 .is_idle = gfx_v10_0_is_idle,
5094 .wait_for_idle = gfx_v10_0_wait_for_idle,
5095 .soft_reset = gfx_v10_0_soft_reset,
5096 .set_clockgating_state = gfx_v10_0_set_clockgating_state,
5097 .set_powergating_state = gfx_v10_0_set_powergating_state,
5098 .get_clockgating_state = gfx_v10_0_get_clockgating_state,
5101 static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
5102 .type = AMDGPU_RING_TYPE_GFX,
5104 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
5105 .support_64bit_ptrs = true,
5106 .vmhub = AMDGPU_GFXHUB_0,
5107 .get_rptr = gfx_v10_0_ring_get_rptr_gfx,
5108 .get_wptr = gfx_v10_0_ring_get_wptr_gfx,
5109 .set_wptr = gfx_v10_0_ring_set_wptr_gfx,
5110 .emit_frame_size = /* totally 242 maximum if 16 IBs */
5112 7 + /* PIPELINE_SYNC */
5113 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
5114 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
5116 8 + /* FENCE for VM_FLUSH */
5117 20 + /* GDS switch */
5118 4 + /* double SWITCH_BUFFER,
5119 * the first COND_EXEC jump to the place
5120 * just prior to this double SWITCH_BUFFER
5129 8 + 8 + /* FENCE x2 */
5130 2, /* SWITCH_BUFFER */
5131 .emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_gfx */
5132 .emit_ib = gfx_v10_0_ring_emit_ib_gfx,
5133 .emit_fence = gfx_v10_0_ring_emit_fence,
5134 .emit_pipeline_sync = gfx_v10_0_ring_emit_pipeline_sync,
5135 .emit_vm_flush = gfx_v10_0_ring_emit_vm_flush,
5136 .emit_gds_switch = gfx_v10_0_ring_emit_gds_switch,
5137 .emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush,
5138 .test_ring = gfx_v10_0_ring_test_ring,
5139 .test_ib = gfx_v10_0_ring_test_ib,
5140 .insert_nop = amdgpu_ring_insert_nop,
5141 .pad_ib = amdgpu_ring_generic_pad_ib,
5142 .emit_switch_buffer = gfx_v10_0_ring_emit_sb,
5143 .emit_cntxcntl = gfx_v10_0_ring_emit_cntxcntl,
5144 .init_cond_exec = gfx_v10_0_ring_emit_init_cond_exec,
5145 .patch_cond_exec = gfx_v10_0_ring_emit_patch_cond_exec,
5146 .preempt_ib = gfx_v10_0_ring_preempt_ib,
5147 .emit_tmz = gfx_v10_0_ring_emit_tmz,
5148 .emit_wreg = gfx_v10_0_ring_emit_wreg,
5149 .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
5152 static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
5153 .type = AMDGPU_RING_TYPE_COMPUTE,
5155 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
5156 .support_64bit_ptrs = true,
5157 .vmhub = AMDGPU_GFXHUB_0,
5158 .get_rptr = gfx_v10_0_ring_get_rptr_compute,
5159 .get_wptr = gfx_v10_0_ring_get_wptr_compute,
5160 .set_wptr = gfx_v10_0_ring_set_wptr_compute,
5162 20 + /* gfx_v10_0_ring_emit_gds_switch */
5163 7 + /* gfx_v10_0_ring_emit_hdp_flush */
5164 5 + /* hdp invalidate */
5165 7 + /* gfx_v10_0_ring_emit_pipeline_sync */
5166 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
5167 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
5168 2 + /* gfx_v10_0_ring_emit_vm_flush */
5169 8 + 8 + 8, /* gfx_v10_0_ring_emit_fence x3 for user fence, vm fence */
5170 .emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */
5171 .emit_ib = gfx_v10_0_ring_emit_ib_compute,
5172 .emit_fence = gfx_v10_0_ring_emit_fence,
5173 .emit_pipeline_sync = gfx_v10_0_ring_emit_pipeline_sync,
5174 .emit_vm_flush = gfx_v10_0_ring_emit_vm_flush,
5175 .emit_gds_switch = gfx_v10_0_ring_emit_gds_switch,
5176 .emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush,
5177 .test_ring = gfx_v10_0_ring_test_ring,
5178 .test_ib = gfx_v10_0_ring_test_ib,
5179 .insert_nop = amdgpu_ring_insert_nop,
5180 .pad_ib = amdgpu_ring_generic_pad_ib,
5181 .emit_wreg = gfx_v10_0_ring_emit_wreg,
5182 .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
5185 static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
5186 .type = AMDGPU_RING_TYPE_KIQ,
5188 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
5189 .support_64bit_ptrs = true,
5190 .vmhub = AMDGPU_GFXHUB_0,
5191 .get_rptr = gfx_v10_0_ring_get_rptr_compute,
5192 .get_wptr = gfx_v10_0_ring_get_wptr_compute,
5193 .set_wptr = gfx_v10_0_ring_set_wptr_compute,
5195 20 + /* gfx_v10_0_ring_emit_gds_switch */
5196 7 + /* gfx_v10_0_ring_emit_hdp_flush */
5197 5 + /*hdp invalidate */
5198 7 + /* gfx_v10_0_ring_emit_pipeline_sync */
5199 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
5200 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
5201 2 + /* gfx_v10_0_ring_emit_vm_flush */
5202 8 + 8 + 8, /* gfx_v10_0_ring_emit_fence_kiq x3 for user fence, vm fence */
5203 .emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */
5204 .emit_ib = gfx_v10_0_ring_emit_ib_compute,
5205 .emit_fence = gfx_v10_0_ring_emit_fence_kiq,
5206 .test_ring = gfx_v10_0_ring_test_ring,
5207 .test_ib = gfx_v10_0_ring_test_ib,
5208 .insert_nop = amdgpu_ring_insert_nop,
5209 .pad_ib = amdgpu_ring_generic_pad_ib,
5210 .emit_rreg = gfx_v10_0_ring_emit_rreg,
5211 .emit_wreg = gfx_v10_0_ring_emit_wreg,
5212 .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
5215 static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev)
5219 adev->gfx.kiq.ring.funcs = &gfx_v10_0_ring_funcs_kiq;
5221 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
5222 adev->gfx.gfx_ring[i].funcs = &gfx_v10_0_ring_funcs_gfx;
5224 for (i = 0; i < adev->gfx.num_compute_rings; i++)
5225 adev->gfx.compute_ring[i].funcs = &gfx_v10_0_ring_funcs_compute;
5228 static const struct amdgpu_irq_src_funcs gfx_v10_0_eop_irq_funcs = {
5229 .set = gfx_v10_0_set_eop_interrupt_state,
5230 .process = gfx_v10_0_eop_irq,
5233 static const struct amdgpu_irq_src_funcs gfx_v10_0_priv_reg_irq_funcs = {
5234 .set = gfx_v10_0_set_priv_reg_fault_state,
5235 .process = gfx_v10_0_priv_reg_irq,
5238 static const struct amdgpu_irq_src_funcs gfx_v10_0_priv_inst_irq_funcs = {
5239 .set = gfx_v10_0_set_priv_inst_fault_state,
5240 .process = gfx_v10_0_priv_inst_irq,
5243 static const struct amdgpu_irq_src_funcs gfx_v10_0_kiq_irq_funcs = {
5244 .set = gfx_v10_0_kiq_set_interrupt_state,
5245 .process = gfx_v10_0_kiq_irq,
5248 static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev)
5250 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
5251 adev->gfx.eop_irq.funcs = &gfx_v10_0_eop_irq_funcs;
5253 adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
5254 adev->gfx.kiq.irq.funcs = &gfx_v10_0_kiq_irq_funcs;
5256 adev->gfx.priv_reg_irq.num_types = 1;
5257 adev->gfx.priv_reg_irq.funcs = &gfx_v10_0_priv_reg_irq_funcs;
5259 adev->gfx.priv_inst_irq.num_types = 1;
5260 adev->gfx.priv_inst_irq.funcs = &gfx_v10_0_priv_inst_irq_funcs;
5263 static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
5265 switch (adev->asic_type) {
5269 adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs;
5276 static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev)
5278 /* init asic gds info */
5279 switch (adev->asic_type) {
5282 adev->gds.gds_size = 0x10000;
5283 adev->gds.gds_compute_max_wave_id = 0x4ff;
5284 adev->gds.vgt_gs_max_wave_id = 0x3ff;
5288 adev->gds.gws_size = 64;
5289 adev->gds.oa_size = 16;
5292 static void gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev,
5300 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
5301 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
5303 WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
5306 static u32 gfx_v10_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev)
5308 u32 data, wgp_bitmask;
5309 data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
5310 data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
5312 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
5313 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
5316 amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1);
5318 return (~data) & wgp_bitmask;
5321 static u32 gfx_v10_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev)
5323 u32 wgp_idx, wgp_active_bitmap;
5324 u32 cu_bitmap_per_wgp, cu_active_bitmap;
5326 wgp_active_bitmap = gfx_v10_0_get_wgp_active_bitmap_per_sh(adev);
5327 cu_active_bitmap = 0;
5329 for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) {
5330 /* if there is one WGP enabled, it means 2 CUs will be enabled */
5331 cu_bitmap_per_wgp = 3 << (2 * wgp_idx);
5332 if (wgp_active_bitmap & (1 << wgp_idx))
5333 cu_active_bitmap |= cu_bitmap_per_wgp;
5336 return cu_active_bitmap;
5339 static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
5340 struct amdgpu_cu_info *cu_info)
5342 int i, j, k, counter, active_cu_number = 0;
5343 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
5344 unsigned disable_masks[4 * 2];
5346 if (!adev || !cu_info)
5349 amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
5351 mutex_lock(&adev->grbm_idx_mutex);
5352 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
5353 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
5357 gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
5359 gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh(
5360 adev, disable_masks[i * 2 + j]);
5361 bitmap = gfx_v10_0_get_cu_active_bitmap_per_sh(adev);
5362 cu_info->bitmap[i][j] = bitmap;
5364 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
5365 if (bitmap & mask) {
5366 if (counter < adev->gfx.config.max_cu_per_sh)
5372 active_cu_number += counter;
5374 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
5375 cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
5378 gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
5379 mutex_unlock(&adev->grbm_idx_mutex);
5381 cu_info->number = active_cu_number;
5382 cu_info->ao_cu_mask = ao_cu_mask;
5383 cu_info->simd_per_cu = NUM_SIMD_PER_CU;
5388 const struct amdgpu_ip_block_version gfx_v10_0_ip_block =
5390 .type = AMD_IP_BLOCK_TYPE_GFX,
5394 .funcs = &gfx_v10_0_ip_funcs,