2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/delay.h>
25 #include <linux/kernel.h>
26 #include <linux/firmware.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
31 #include "amdgpu_gfx.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "amdgpu_pm.h"
37 #include "gc/gc_9_0_offset.h"
38 #include "gc/gc_9_0_sh_mask.h"
40 #include "vega10_enum.h"
41 #include "hdp/hdp_4_0_offset.h"
43 #include "soc15_common.h"
44 #include "clearstate_gfx9.h"
45 #include "v9_structs.h"
47 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
49 #include "amdgpu_ras.h"
51 #define GFX9_NUM_GFX_RINGS 1
52 #define GFX9_MEC_HPD_SIZE 4096
53 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
54 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
56 #define mmPWR_MISC_CNTL_STATUS 0x0183
57 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
58 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
59 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
60 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
61 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
63 #define mmGCEA_PROBE_MAP 0x070c
64 #define mmGCEA_PROBE_MAP_BASE_IDX 0
66 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
67 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
68 MODULE_FIRMWARE("amdgpu/vega10_me.bin");
69 MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
70 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
71 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
73 MODULE_FIRMWARE("amdgpu/vega12_ce.bin");
74 MODULE_FIRMWARE("amdgpu/vega12_pfp.bin");
75 MODULE_FIRMWARE("amdgpu/vega12_me.bin");
76 MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
77 MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
78 MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
80 MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
81 MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
82 MODULE_FIRMWARE("amdgpu/vega20_me.bin");
83 MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
84 MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
85 MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");
87 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
88 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
89 MODULE_FIRMWARE("amdgpu/raven_me.bin");
90 MODULE_FIRMWARE("amdgpu/raven_mec.bin");
91 MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
92 MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
94 MODULE_FIRMWARE("amdgpu/picasso_ce.bin");
95 MODULE_FIRMWARE("amdgpu/picasso_pfp.bin");
96 MODULE_FIRMWARE("amdgpu/picasso_me.bin");
97 MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
98 MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
99 MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
100 MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
102 MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
103 MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
104 MODULE_FIRMWARE("amdgpu/raven2_me.bin");
105 MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
106 MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
107 MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
108 MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");
110 MODULE_FIRMWARE("amdgpu/arcturus_mec.bin");
111 MODULE_FIRMWARE("amdgpu/arcturus_mec2.bin");
112 MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin");
114 MODULE_FIRMWARE("amdgpu/renoir_ce.bin");
115 MODULE_FIRMWARE("amdgpu/renoir_pfp.bin");
116 MODULE_FIRMWARE("amdgpu/renoir_me.bin");
117 MODULE_FIRMWARE("amdgpu/renoir_mec.bin");
118 MODULE_FIRMWARE("amdgpu/renoir_mec2.bin");
119 MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
121 #define mmTCP_CHAN_STEER_0_ARCT 0x0b03
122 #define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX 0
123 #define mmTCP_CHAN_STEER_1_ARCT 0x0b04
124 #define mmTCP_CHAN_STEER_1_ARCT_BASE_IDX 0
125 #define mmTCP_CHAN_STEER_2_ARCT 0x0b09
126 #define mmTCP_CHAN_STEER_2_ARCT_BASE_IDX 0
127 #define mmTCP_CHAN_STEER_3_ARCT 0x0b0a
128 #define mmTCP_CHAN_STEER_3_ARCT_BASE_IDX 0
129 #define mmTCP_CHAN_STEER_4_ARCT 0x0b0b
130 #define mmTCP_CHAN_STEER_4_ARCT_BASE_IDX 0
131 #define mmTCP_CHAN_STEER_5_ARCT 0x0b0c
132 #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0
134 enum ta_ras_gfx_subblock {
136 TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
137 TA_RAS_BLOCK__GFX_CPC_SCRATCH = TA_RAS_BLOCK__GFX_CPC_INDEX_START,
138 TA_RAS_BLOCK__GFX_CPC_UCODE,
139 TA_RAS_BLOCK__GFX_DC_STATE_ME1,
140 TA_RAS_BLOCK__GFX_DC_CSINVOC_ME1,
141 TA_RAS_BLOCK__GFX_DC_RESTORE_ME1,
142 TA_RAS_BLOCK__GFX_DC_STATE_ME2,
143 TA_RAS_BLOCK__GFX_DC_CSINVOC_ME2,
144 TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
145 TA_RAS_BLOCK__GFX_CPC_INDEX_END = TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
147 TA_RAS_BLOCK__GFX_CPF_INDEX_START,
148 TA_RAS_BLOCK__GFX_CPF_ROQ_ME2 = TA_RAS_BLOCK__GFX_CPF_INDEX_START,
149 TA_RAS_BLOCK__GFX_CPF_ROQ_ME1,
150 TA_RAS_BLOCK__GFX_CPF_TAG,
151 TA_RAS_BLOCK__GFX_CPF_INDEX_END = TA_RAS_BLOCK__GFX_CPF_TAG,
153 TA_RAS_BLOCK__GFX_CPG_INDEX_START,
154 TA_RAS_BLOCK__GFX_CPG_DMA_ROQ = TA_RAS_BLOCK__GFX_CPG_INDEX_START,
155 TA_RAS_BLOCK__GFX_CPG_DMA_TAG,
156 TA_RAS_BLOCK__GFX_CPG_TAG,
157 TA_RAS_BLOCK__GFX_CPG_INDEX_END = TA_RAS_BLOCK__GFX_CPG_TAG,
159 TA_RAS_BLOCK__GFX_GDS_INDEX_START,
160 TA_RAS_BLOCK__GFX_GDS_MEM = TA_RAS_BLOCK__GFX_GDS_INDEX_START,
161 TA_RAS_BLOCK__GFX_GDS_INPUT_QUEUE,
162 TA_RAS_BLOCK__GFX_GDS_OA_PHY_CMD_RAM_MEM,
163 TA_RAS_BLOCK__GFX_GDS_OA_PHY_DATA_RAM_MEM,
164 TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
165 TA_RAS_BLOCK__GFX_GDS_INDEX_END = TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
167 TA_RAS_BLOCK__GFX_SPI_SR_MEM,
169 TA_RAS_BLOCK__GFX_SQ_INDEX_START,
170 TA_RAS_BLOCK__GFX_SQ_SGPR = TA_RAS_BLOCK__GFX_SQ_INDEX_START,
171 TA_RAS_BLOCK__GFX_SQ_LDS_D,
172 TA_RAS_BLOCK__GFX_SQ_LDS_I,
173 TA_RAS_BLOCK__GFX_SQ_VGPR, /* VGPR = SP*/
174 TA_RAS_BLOCK__GFX_SQ_INDEX_END = TA_RAS_BLOCK__GFX_SQ_VGPR,
176 TA_RAS_BLOCK__GFX_SQC_INDEX_START,
178 TA_RAS_BLOCK__GFX_SQC_INDEX0_START = TA_RAS_BLOCK__GFX_SQC_INDEX_START,
179 TA_RAS_BLOCK__GFX_SQC_INST_UTCL1_LFIFO =
180 TA_RAS_BLOCK__GFX_SQC_INDEX0_START,
181 TA_RAS_BLOCK__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
182 TA_RAS_BLOCK__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
183 TA_RAS_BLOCK__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
184 TA_RAS_BLOCK__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
185 TA_RAS_BLOCK__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
186 TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
187 TA_RAS_BLOCK__GFX_SQC_INDEX0_END =
188 TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
190 TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
191 TA_RAS_BLOCK__GFX_SQC_INST_BANKA_TAG_RAM =
192 TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
193 TA_RAS_BLOCK__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
194 TA_RAS_BLOCK__GFX_SQC_INST_BANKA_MISS_FIFO,
195 TA_RAS_BLOCK__GFX_SQC_INST_BANKA_BANK_RAM,
196 TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_TAG_RAM,
197 TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_HIT_FIFO,
198 TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_MISS_FIFO,
199 TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
200 TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
201 TA_RAS_BLOCK__GFX_SQC_INDEX1_END =
202 TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
204 TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
205 TA_RAS_BLOCK__GFX_SQC_INST_BANKB_TAG_RAM =
206 TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
207 TA_RAS_BLOCK__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
208 TA_RAS_BLOCK__GFX_SQC_INST_BANKB_MISS_FIFO,
209 TA_RAS_BLOCK__GFX_SQC_INST_BANKB_BANK_RAM,
210 TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_TAG_RAM,
211 TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_HIT_FIFO,
212 TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_MISS_FIFO,
213 TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
214 TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
215 TA_RAS_BLOCK__GFX_SQC_INDEX2_END =
216 TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
217 TA_RAS_BLOCK__GFX_SQC_INDEX_END = TA_RAS_BLOCK__GFX_SQC_INDEX2_END,
219 TA_RAS_BLOCK__GFX_TA_INDEX_START,
220 TA_RAS_BLOCK__GFX_TA_FS_DFIFO = TA_RAS_BLOCK__GFX_TA_INDEX_START,
221 TA_RAS_BLOCK__GFX_TA_FS_AFIFO,
222 TA_RAS_BLOCK__GFX_TA_FL_LFIFO,
223 TA_RAS_BLOCK__GFX_TA_FX_LFIFO,
224 TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
225 TA_RAS_BLOCK__GFX_TA_INDEX_END = TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
227 TA_RAS_BLOCK__GFX_TCA_INDEX_START,
228 TA_RAS_BLOCK__GFX_TCA_HOLE_FIFO = TA_RAS_BLOCK__GFX_TCA_INDEX_START,
229 TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
230 TA_RAS_BLOCK__GFX_TCA_INDEX_END = TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
231 /* TCC (5 sub-ranges)*/
232 TA_RAS_BLOCK__GFX_TCC_INDEX_START,
234 TA_RAS_BLOCK__GFX_TCC_INDEX0_START = TA_RAS_BLOCK__GFX_TCC_INDEX_START,
235 TA_RAS_BLOCK__GFX_TCC_CACHE_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX0_START,
236 TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_0_1,
237 TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_0,
238 TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_1,
239 TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_0,
240 TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_1,
241 TA_RAS_BLOCK__GFX_TCC_HIGH_RATE_TAG,
242 TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
243 TA_RAS_BLOCK__GFX_TCC_INDEX0_END = TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
245 TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
246 TA_RAS_BLOCK__GFX_TCC_IN_USE_DEC = TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
247 TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
248 TA_RAS_BLOCK__GFX_TCC_INDEX1_END =
249 TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
251 TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
252 TA_RAS_BLOCK__GFX_TCC_RETURN_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
253 TA_RAS_BLOCK__GFX_TCC_RETURN_CONTROL,
254 TA_RAS_BLOCK__GFX_TCC_UC_ATOMIC_FIFO,
255 TA_RAS_BLOCK__GFX_TCC_WRITE_RETURN,
256 TA_RAS_BLOCK__GFX_TCC_WRITE_CACHE_READ,
257 TA_RAS_BLOCK__GFX_TCC_SRC_FIFO,
258 TA_RAS_BLOCK__GFX_TCC_SRC_FIFO_NEXT_RAM,
259 TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
260 TA_RAS_BLOCK__GFX_TCC_INDEX2_END =
261 TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
263 TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
264 TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO = TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
265 TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
266 TA_RAS_BLOCK__GFX_TCC_INDEX3_END =
267 TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
269 TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
270 TA_RAS_BLOCK__GFX_TCC_WRRET_TAG_WRITE_RETURN =
271 TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
272 TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
273 TA_RAS_BLOCK__GFX_TCC_INDEX4_END =
274 TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
275 TA_RAS_BLOCK__GFX_TCC_INDEX_END = TA_RAS_BLOCK__GFX_TCC_INDEX4_END,
277 TA_RAS_BLOCK__GFX_TCI_WRITE_RAM,
279 TA_RAS_BLOCK__GFX_TCP_INDEX_START,
280 TA_RAS_BLOCK__GFX_TCP_CACHE_RAM = TA_RAS_BLOCK__GFX_TCP_INDEX_START,
281 TA_RAS_BLOCK__GFX_TCP_LFIFO_RAM,
282 TA_RAS_BLOCK__GFX_TCP_CMD_FIFO,
283 TA_RAS_BLOCK__GFX_TCP_VM_FIFO,
284 TA_RAS_BLOCK__GFX_TCP_DB_RAM,
285 TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO0,
286 TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
287 TA_RAS_BLOCK__GFX_TCP_INDEX_END = TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
289 TA_RAS_BLOCK__GFX_TD_INDEX_START,
290 TA_RAS_BLOCK__GFX_TD_SS_FIFO_LO = TA_RAS_BLOCK__GFX_TD_INDEX_START,
291 TA_RAS_BLOCK__GFX_TD_SS_FIFO_HI,
292 TA_RAS_BLOCK__GFX_TD_CS_FIFO,
293 TA_RAS_BLOCK__GFX_TD_INDEX_END = TA_RAS_BLOCK__GFX_TD_CS_FIFO,
294 /* EA (3 sub-ranges)*/
295 TA_RAS_BLOCK__GFX_EA_INDEX_START,
297 TA_RAS_BLOCK__GFX_EA_INDEX0_START = TA_RAS_BLOCK__GFX_EA_INDEX_START,
298 TA_RAS_BLOCK__GFX_EA_DRAMRD_CMDMEM = TA_RAS_BLOCK__GFX_EA_INDEX0_START,
299 TA_RAS_BLOCK__GFX_EA_DRAMWR_CMDMEM,
300 TA_RAS_BLOCK__GFX_EA_DRAMWR_DATAMEM,
301 TA_RAS_BLOCK__GFX_EA_RRET_TAGMEM,
302 TA_RAS_BLOCK__GFX_EA_WRET_TAGMEM,
303 TA_RAS_BLOCK__GFX_EA_GMIRD_CMDMEM,
304 TA_RAS_BLOCK__GFX_EA_GMIWR_CMDMEM,
305 TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
306 TA_RAS_BLOCK__GFX_EA_INDEX0_END = TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
308 TA_RAS_BLOCK__GFX_EA_INDEX1_START,
309 TA_RAS_BLOCK__GFX_EA_DRAMRD_PAGEMEM = TA_RAS_BLOCK__GFX_EA_INDEX1_START,
310 TA_RAS_BLOCK__GFX_EA_DRAMWR_PAGEMEM,
311 TA_RAS_BLOCK__GFX_EA_IORD_CMDMEM,
312 TA_RAS_BLOCK__GFX_EA_IOWR_CMDMEM,
313 TA_RAS_BLOCK__GFX_EA_IOWR_DATAMEM,
314 TA_RAS_BLOCK__GFX_EA_GMIRD_PAGEMEM,
315 TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
316 TA_RAS_BLOCK__GFX_EA_INDEX1_END = TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
318 TA_RAS_BLOCK__GFX_EA_INDEX2_START,
319 TA_RAS_BLOCK__GFX_EA_MAM_D0MEM = TA_RAS_BLOCK__GFX_EA_INDEX2_START,
320 TA_RAS_BLOCK__GFX_EA_MAM_D1MEM,
321 TA_RAS_BLOCK__GFX_EA_MAM_D2MEM,
322 TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
323 TA_RAS_BLOCK__GFX_EA_INDEX2_END = TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
324 TA_RAS_BLOCK__GFX_EA_INDEX_END = TA_RAS_BLOCK__GFX_EA_INDEX2_END,
326 TA_RAS_BLOCK__UTC_VML2_BANK_CACHE,
328 TA_RAS_BLOCK__UTC_VML2_WALKER,
329 /* UTC ATC L2 2MB cache*/
330 TA_RAS_BLOCK__UTC_ATCL2_CACHE_2M_BANK,
331 /* UTC ATC L2 4KB cache*/
332 TA_RAS_BLOCK__UTC_ATCL2_CACHE_4K_BANK,
333 TA_RAS_BLOCK__GFX_MAX
336 struct ras_gfx_subblock {
339 int hw_supported_error_type;
340 int sw_supported_error_type;
343 #define AMDGPU_RAS_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h) \
344 [AMDGPU_RAS_BLOCK__##subblock] = { \
346 TA_RAS_BLOCK__##subblock, \
347 ((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)), \
348 (((e) << 1) | ((f) << 3) | (g) | ((h) << 2)), \
351 static const struct ras_gfx_subblock ras_gfx_subblocks[] = {
352 AMDGPU_RAS_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1, 1, 0, 0, 1),
353 AMDGPU_RAS_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1, 1, 0, 0, 1),
354 AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
355 AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
356 AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
357 AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
358 AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
359 AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
360 AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
361 AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
362 AMDGPU_RAS_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1, 1, 0, 0, 1),
363 AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1, 0, 0, 1, 0),
364 AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1, 0, 1, 0, 1),
365 AMDGPU_RAS_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1, 1, 1, 0, 1),
366 AMDGPU_RAS_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
367 AMDGPU_RAS_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1, 0, 0, 0, 0),
368 AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1, 0, 0, 0,
370 AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1, 0, 0, 0,
372 AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
373 AMDGPU_RAS_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1, 0, 0, 0, 0),
374 AMDGPU_RAS_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1, 0, 0, 0, 0),
375 AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1, 1, 0, 0, 1),
376 AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1, 0, 0, 0, 0),
377 AMDGPU_RAS_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1, 0, 0, 0, 0),
378 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, 1),
379 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
381 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
383 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
385 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1, 1, 0, 0,
387 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
389 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
391 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
393 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
395 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
397 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
399 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
401 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
403 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
405 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
407 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
409 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
411 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
413 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
415 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
417 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
419 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
421 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
423 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
425 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
427 AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1, 1, 0, 0, 1),
428 AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
429 AMDGPU_RAS_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
430 AMDGPU_RAS_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
431 AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
432 AMDGPU_RAS_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1, 0, 1, 1, 0),
433 AMDGPU_RAS_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
434 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1, 1, 0, 0, 1),
435 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1, 1, 0, 0,
437 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1, 1, 0, 0,
439 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1, 1, 0, 0,
441 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1, 0, 0, 0,
443 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1, 0, 0, 0,
445 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
446 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
447 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1, 0, 0, 0, 0),
448 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1, 0, 0, 0, 0),
449 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1, 0, 0, 0, 0),
450 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1, 0, 0, 0, 0),
451 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
452 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1, 0, 1, 1, 0),
453 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1, 0, 0, 0, 0),
454 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
455 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 1, 0),
456 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1, 0, 0, 0,
458 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
459 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 0,
461 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1, 0, 0,
463 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1, 0, 0, 0,
465 AMDGPU_RAS_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
466 AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1, 1, 0, 0, 1),
467 AMDGPU_RAS_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1, 0, 0, 0, 0),
468 AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
469 AMDGPU_RAS_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
470 AMDGPU_RAS_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
471 AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1, 0, 0, 0, 0),
472 AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1, 0, 0, 0, 0),
473 AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1, 1, 0, 0, 1),
474 AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1, 0, 0, 0, 0),
475 AMDGPU_RAS_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
476 AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1, 1, 0, 0, 1),
477 AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
478 AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
479 AMDGPU_RAS_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
480 AMDGPU_RAS_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
481 AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
482 AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
483 AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
484 AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
485 AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
486 AMDGPU_RAS_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
487 AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
488 AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1, 0, 0, 0, 0),
489 AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
490 AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
491 AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1, 0, 0, 0, 0),
492 AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1, 0, 0, 0, 0),
493 AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1, 0, 0, 0, 0),
494 AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1, 0, 0, 0, 0),
495 AMDGPU_RAS_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1, 0, 0, 0, 0),
496 AMDGPU_RAS_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1, 0, 0, 0, 0),
497 AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1, 0, 0, 0, 0),
498 AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1, 0, 0, 0, 0),
501 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
503 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
504 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000),
505 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
506 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
507 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
508 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
509 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
510 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
511 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
512 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
513 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
514 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
515 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
516 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
517 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
518 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
519 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
520 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
521 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
522 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
525 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
527 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
528 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
529 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
530 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
531 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
532 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
533 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
534 SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
535 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
536 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
537 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
538 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
539 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
540 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
541 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
542 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
543 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
544 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
547 static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
549 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080),
550 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
551 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
552 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
553 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042),
554 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400),
555 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000),
556 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000),
557 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107),
558 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000),
559 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000)
562 static const struct soc15_reg_golden golden_settings_gc_9_1[] =
564 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
565 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
566 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
567 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
568 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
569 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
570 SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
571 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
572 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
573 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
574 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
575 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
576 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
577 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
578 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
579 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
580 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
581 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
582 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
583 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
584 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
585 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
586 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
587 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
590 static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
592 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
593 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
594 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
595 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
596 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
597 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
598 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
601 static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
603 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0xff7fffff, 0x04000000),
604 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
605 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
606 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x7f0fffff, 0x08000080),
607 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0xff8fffff, 0x08000080),
608 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x7f8fffff, 0x08000080),
609 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x26013041),
610 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x26013041),
611 SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
612 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
613 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0xff0fffff, 0x08000080),
614 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0xff0fffff, 0x08000080),
615 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0xff0fffff, 0x08000080),
616 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0xff0fffff, 0x08000080),
617 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0xff0fffff, 0x08000080),
618 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
619 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010),
620 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
621 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
624 static const struct soc15_reg_golden golden_settings_gc_9_1_rn[] =
626 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
627 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
628 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
629 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x24000042),
630 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x24000042),
631 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
632 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
633 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
634 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
635 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
636 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
637 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_PROBE_MAP, 0xffffffff, 0x0000cccc),
640 static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
642 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff),
643 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
644 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
647 static const struct soc15_reg_golden golden_settings_gc_9_2_1[] =
649 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
650 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
651 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
652 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
653 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
654 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
655 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
656 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
657 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
658 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
659 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
660 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
661 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
662 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
663 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
664 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
667 static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
669 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080),
670 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
671 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
672 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041),
673 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041),
674 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
675 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
676 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
677 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
678 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
679 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
680 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
681 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
684 static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
686 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
687 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x10b0000),
688 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_0_ARCT, 0x3fffffff, 0x346f0a4e),
689 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_1_ARCT, 0x3fffffff, 0x1c642ca),
690 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_2_ARCT, 0x3fffffff, 0x26f45098),
691 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_3_ARCT, 0x3fffffff, 0x2ebd9fe3),
692 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1),
693 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
696 static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
698 mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
699 mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
700 mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
701 mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
702 mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
703 mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
704 mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
705 mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
708 static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
710 mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0,
711 mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0,
712 mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0,
713 mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0,
714 mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0,
715 mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0,
716 mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0,
717 mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
720 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
721 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
722 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
723 #define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041
725 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
726 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
727 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
728 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
729 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
730 struct amdgpu_cu_info *cu_info);
731 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
732 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
733 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
734 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
735 static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
736 void *ras_error_status);
737 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
740 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
742 switch (adev->asic_type) {
744 soc15_program_register_sequence(adev,
745 golden_settings_gc_9_0,
746 ARRAY_SIZE(golden_settings_gc_9_0));
747 soc15_program_register_sequence(adev,
748 golden_settings_gc_9_0_vg10,
749 ARRAY_SIZE(golden_settings_gc_9_0_vg10));
752 soc15_program_register_sequence(adev,
753 golden_settings_gc_9_2_1,
754 ARRAY_SIZE(golden_settings_gc_9_2_1));
755 soc15_program_register_sequence(adev,
756 golden_settings_gc_9_2_1_vg12,
757 ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
760 soc15_program_register_sequence(adev,
761 golden_settings_gc_9_0,
762 ARRAY_SIZE(golden_settings_gc_9_0));
763 soc15_program_register_sequence(adev,
764 golden_settings_gc_9_0_vg20,
765 ARRAY_SIZE(golden_settings_gc_9_0_vg20));
768 soc15_program_register_sequence(adev,
769 golden_settings_gc_9_4_1_arct,
770 ARRAY_SIZE(golden_settings_gc_9_4_1_arct));
773 soc15_program_register_sequence(adev, golden_settings_gc_9_1,
774 ARRAY_SIZE(golden_settings_gc_9_1));
775 if (adev->rev_id >= 8)
776 soc15_program_register_sequence(adev,
777 golden_settings_gc_9_1_rv2,
778 ARRAY_SIZE(golden_settings_gc_9_1_rv2));
780 soc15_program_register_sequence(adev,
781 golden_settings_gc_9_1_rv1,
782 ARRAY_SIZE(golden_settings_gc_9_1_rv1));
785 soc15_program_register_sequence(adev,
786 golden_settings_gc_9_1_rn,
787 ARRAY_SIZE(golden_settings_gc_9_1_rn));
788 return; /* for renoir, don't need common goldensetting */
793 if (adev->asic_type != CHIP_ARCTURUS)
794 soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
795 (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
798 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
800 adev->gfx.scratch.num_reg = 8;
801 adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
802 adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
805 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
806 bool wc, uint32_t reg, uint32_t val)
808 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
809 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
810 WRITE_DATA_DST_SEL(0) |
811 (wc ? WR_CONFIRM : 0));
812 amdgpu_ring_write(ring, reg);
813 amdgpu_ring_write(ring, 0);
814 amdgpu_ring_write(ring, val);
817 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
818 int mem_space, int opt, uint32_t addr0,
819 uint32_t addr1, uint32_t ref, uint32_t mask,
822 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
823 amdgpu_ring_write(ring,
824 /* memory (1) or register (0) */
825 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
826 WAIT_REG_MEM_OPERATION(opt) | /* wait */
827 WAIT_REG_MEM_FUNCTION(3) | /* equal */
828 WAIT_REG_MEM_ENGINE(eng_sel)));
831 BUG_ON(addr0 & 0x3); /* Dword align */
832 amdgpu_ring_write(ring, addr0);
833 amdgpu_ring_write(ring, addr1);
834 amdgpu_ring_write(ring, ref);
835 amdgpu_ring_write(ring, mask);
836 amdgpu_ring_write(ring, inv); /* poll interval */
839 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
841 struct amdgpu_device *adev = ring->adev;
847 r = amdgpu_gfx_scratch_get(adev, &scratch);
851 WREG32(scratch, 0xCAFEDEAD);
852 r = amdgpu_ring_alloc(ring, 3);
854 goto error_free_scratch;
856 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
857 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
858 amdgpu_ring_write(ring, 0xDEADBEEF);
859 amdgpu_ring_commit(ring);
861 for (i = 0; i < adev->usec_timeout; i++) {
862 tmp = RREG32(scratch);
863 if (tmp == 0xDEADBEEF)
868 if (i >= adev->usec_timeout)
872 amdgpu_gfx_scratch_free(adev, scratch);
876 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
878 struct amdgpu_device *adev = ring->adev;
880 struct dma_fence *f = NULL;
887 r = amdgpu_device_wb_get(adev, &index);
891 gpu_addr = adev->wb.gpu_addr + (index * 4);
892 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
893 memset(&ib, 0, sizeof(ib));
894 r = amdgpu_ib_get(adev, NULL, 16, &ib);
898 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
899 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
900 ib.ptr[2] = lower_32_bits(gpu_addr);
901 ib.ptr[3] = upper_32_bits(gpu_addr);
902 ib.ptr[4] = 0xDEADBEEF;
905 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
909 r = dma_fence_wait_timeout(f, false, timeout);
917 tmp = adev->wb.wb[index];
918 if (tmp == 0xDEADBEEF)
924 amdgpu_ib_free(adev, &ib, NULL);
927 amdgpu_device_wb_free(adev, index);
932 static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
934 release_firmware(adev->gfx.pfp_fw);
935 adev->gfx.pfp_fw = NULL;
936 release_firmware(adev->gfx.me_fw);
937 adev->gfx.me_fw = NULL;
938 release_firmware(adev->gfx.ce_fw);
939 adev->gfx.ce_fw = NULL;
940 release_firmware(adev->gfx.rlc_fw);
941 adev->gfx.rlc_fw = NULL;
942 release_firmware(adev->gfx.mec_fw);
943 adev->gfx.mec_fw = NULL;
944 release_firmware(adev->gfx.mec2_fw);
945 adev->gfx.mec2_fw = NULL;
947 kfree(adev->gfx.rlc.register_list_format);
950 static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
952 const struct rlc_firmware_header_v2_1 *rlc_hdr;
954 rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
955 adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
956 adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
957 adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
958 adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
959 adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
960 adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
961 adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
962 adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
963 adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
964 adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
965 adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
966 adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
967 adev->gfx.rlc.reg_list_format_direct_reg_list_length =
968 le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
971 static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
973 adev->gfx.me_fw_write_wait = false;
974 adev->gfx.mec_fw_write_wait = false;
976 switch (adev->asic_type) {
978 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
979 (adev->gfx.me_feature_version >= 42) &&
980 (adev->gfx.pfp_fw_version >= 0x000000b1) &&
981 (adev->gfx.pfp_feature_version >= 42))
982 adev->gfx.me_fw_write_wait = true;
984 if ((adev->gfx.mec_fw_version >= 0x00000193) &&
985 (adev->gfx.mec_feature_version >= 42))
986 adev->gfx.mec_fw_write_wait = true;
989 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
990 (adev->gfx.me_feature_version >= 44) &&
991 (adev->gfx.pfp_fw_version >= 0x000000b2) &&
992 (adev->gfx.pfp_feature_version >= 44))
993 adev->gfx.me_fw_write_wait = true;
995 if ((adev->gfx.mec_fw_version >= 0x00000196) &&
996 (adev->gfx.mec_feature_version >= 44))
997 adev->gfx.mec_fw_write_wait = true;
1000 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1001 (adev->gfx.me_feature_version >= 44) &&
1002 (adev->gfx.pfp_fw_version >= 0x000000b2) &&
1003 (adev->gfx.pfp_feature_version >= 44))
1004 adev->gfx.me_fw_write_wait = true;
1006 if ((adev->gfx.mec_fw_version >= 0x00000197) &&
1007 (adev->gfx.mec_feature_version >= 44))
1008 adev->gfx.mec_fw_write_wait = true;
1011 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1012 (adev->gfx.me_feature_version >= 42) &&
1013 (adev->gfx.pfp_fw_version >= 0x000000b1) &&
1014 (adev->gfx.pfp_feature_version >= 42))
1015 adev->gfx.me_fw_write_wait = true;
1017 if ((adev->gfx.mec_fw_version >= 0x00000192) &&
1018 (adev->gfx.mec_feature_version >= 42))
1019 adev->gfx.mec_fw_write_wait = true;
1026 static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
1028 switch (adev->asic_type) {
1034 if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
1035 &&((adev->gfx.rlc_fw_version != 106 &&
1036 adev->gfx.rlc_fw_version < 531) ||
1037 (adev->gfx.rlc_fw_version == 53815) ||
1038 (adev->gfx.rlc_feature_version < 1) ||
1039 !adev->gfx.rlc.is_rlc_v2_1))
1040 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1042 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1043 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1045 AMD_PG_SUPPORT_RLC_SMU_HS;
1052 static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
1053 const char *chip_name)
1057 struct amdgpu_firmware_info *info = NULL;
1058 const struct common_firmware_header *header = NULL;
1059 const struct gfx_firmware_header_v1_0 *cp_hdr;
1061 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
1062 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
1065 err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
1068 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
1069 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1070 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1072 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
1073 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
1076 err = amdgpu_ucode_validate(adev->gfx.me_fw);
1079 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
1080 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1081 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1083 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
1084 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
1087 err = amdgpu_ucode_validate(adev->gfx.ce_fw);
1090 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
1091 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1092 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1094 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1095 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
1096 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
1097 info->fw = adev->gfx.pfp_fw;
1098 header = (const struct common_firmware_header *)info->fw->data;
1099 adev->firmware.fw_size +=
1100 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1102 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
1103 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
1104 info->fw = adev->gfx.me_fw;
1105 header = (const struct common_firmware_header *)info->fw->data;
1106 adev->firmware.fw_size +=
1107 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1109 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
1110 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
1111 info->fw = adev->gfx.ce_fw;
1112 header = (const struct common_firmware_header *)info->fw->data;
1113 adev->firmware.fw_size +=
1114 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1120 "gfx9: Failed to load firmware \"%s\"\n",
1122 release_firmware(adev->gfx.pfp_fw);
1123 adev->gfx.pfp_fw = NULL;
1124 release_firmware(adev->gfx.me_fw);
1125 adev->gfx.me_fw = NULL;
1126 release_firmware(adev->gfx.ce_fw);
1127 adev->gfx.ce_fw = NULL;
1132 static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
1133 const char *chip_name)
1137 struct amdgpu_firmware_info *info = NULL;
1138 const struct common_firmware_header *header = NULL;
1139 const struct rlc_firmware_header_v2_0 *rlc_hdr;
1140 unsigned int *tmp = NULL;
1142 uint16_t version_major;
1143 uint16_t version_minor;
1144 uint32_t smu_version;
1147 * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
1148 * instead of picasso_rlc.bin.
1150 * PCO AM4: revision >= 0xC8 && revision <= 0xCF
1151 * or revision >= 0xD8 && revision <= 0xDF
1152 * otherwise is PCO FP5
1154 if (!strcmp(chip_name, "picasso") &&
1155 (((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
1156 ((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
1157 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
1158 else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) &&
1159 (smu_version >= 0x41e2b))
1161 *SMC is loaded by SBIOS on APU and it's able to get the SMU version directly.
1163 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_kicker_rlc.bin", chip_name);
1165 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
1166 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
1169 err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
1170 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1172 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1173 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1174 if (version_major == 2 && version_minor == 1)
1175 adev->gfx.rlc.is_rlc_v2_1 = true;
1177 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
1178 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
1179 adev->gfx.rlc.save_and_restore_offset =
1180 le32_to_cpu(rlc_hdr->save_and_restore_offset);
1181 adev->gfx.rlc.clear_state_descriptor_offset =
1182 le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
1183 adev->gfx.rlc.avail_scratch_ram_locations =
1184 le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
1185 adev->gfx.rlc.reg_restore_list_size =
1186 le32_to_cpu(rlc_hdr->reg_restore_list_size);
1187 adev->gfx.rlc.reg_list_format_start =
1188 le32_to_cpu(rlc_hdr->reg_list_format_start);
1189 adev->gfx.rlc.reg_list_format_separate_start =
1190 le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
1191 adev->gfx.rlc.starting_offsets_start =
1192 le32_to_cpu(rlc_hdr->starting_offsets_start);
1193 adev->gfx.rlc.reg_list_format_size_bytes =
1194 le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
1195 adev->gfx.rlc.reg_list_size_bytes =
1196 le32_to_cpu(rlc_hdr->reg_list_size_bytes);
1197 adev->gfx.rlc.register_list_format =
1198 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
1199 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
1200 if (!adev->gfx.rlc.register_list_format) {
1205 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1206 le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
1207 for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
1208 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
1210 adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
1212 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1213 le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
1214 for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
1215 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
1217 if (adev->gfx.rlc.is_rlc_v2_1)
1218 gfx_v9_0_init_rlc_ext_microcode(adev);
1220 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1221 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
1222 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
1223 info->fw = adev->gfx.rlc_fw;
1224 header = (const struct common_firmware_header *)info->fw->data;
1225 adev->firmware.fw_size +=
1226 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1228 if (adev->gfx.rlc.is_rlc_v2_1 &&
1229 adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
1230 adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
1231 adev->gfx.rlc.save_restore_list_srm_size_bytes) {
1232 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
1233 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
1234 info->fw = adev->gfx.rlc_fw;
1235 adev->firmware.fw_size +=
1236 ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
1238 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
1239 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
1240 info->fw = adev->gfx.rlc_fw;
1241 adev->firmware.fw_size +=
1242 ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
1244 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
1245 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
1246 info->fw = adev->gfx.rlc_fw;
1247 adev->firmware.fw_size +=
1248 ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
1255 "gfx9: Failed to load firmware \"%s\"\n",
1257 release_firmware(adev->gfx.rlc_fw);
1258 adev->gfx.rlc_fw = NULL;
1263 static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
1264 const char *chip_name)
1268 struct amdgpu_firmware_info *info = NULL;
1269 const struct common_firmware_header *header = NULL;
1270 const struct gfx_firmware_header_v1_0 *cp_hdr;
1272 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
1273 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
1276 err = amdgpu_ucode_validate(adev->gfx.mec_fw);
1279 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1280 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1281 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1284 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
1285 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
1287 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
1290 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1291 adev->gfx.mec2_fw->data;
1292 adev->gfx.mec2_fw_version =
1293 le32_to_cpu(cp_hdr->header.ucode_version);
1294 adev->gfx.mec2_feature_version =
1295 le32_to_cpu(cp_hdr->ucode_feature_version);
1298 adev->gfx.mec2_fw = NULL;
1301 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1302 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
1303 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
1304 info->fw = adev->gfx.mec_fw;
1305 header = (const struct common_firmware_header *)info->fw->data;
1306 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
1307 adev->firmware.fw_size +=
1308 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1310 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
1311 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
1312 info->fw = adev->gfx.mec_fw;
1313 adev->firmware.fw_size +=
1314 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1316 if (adev->gfx.mec2_fw) {
1317 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
1318 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
1319 info->fw = adev->gfx.mec2_fw;
1320 header = (const struct common_firmware_header *)info->fw->data;
1321 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
1322 adev->firmware.fw_size +=
1323 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1325 /* TODO: Determine if MEC2 JT FW loading can be removed
1326 for all GFX V9 asic and above */
1327 if (adev->asic_type != CHIP_ARCTURUS &&
1328 adev->asic_type != CHIP_RENOIR) {
1329 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
1330 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
1331 info->fw = adev->gfx.mec2_fw;
1332 adev->firmware.fw_size +=
1333 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4,
1340 gfx_v9_0_check_if_need_gfxoff(adev);
1341 gfx_v9_0_check_fw_write_wait(adev);
1344 "gfx9: Failed to load firmware \"%s\"\n",
1346 release_firmware(adev->gfx.mec_fw);
1347 adev->gfx.mec_fw = NULL;
1348 release_firmware(adev->gfx.mec2_fw);
1349 adev->gfx.mec2_fw = NULL;
1354 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
1356 const char *chip_name;
1361 switch (adev->asic_type) {
1363 chip_name = "vega10";
1366 chip_name = "vega12";
1369 chip_name = "vega20";
1372 if (adev->rev_id >= 8)
1373 chip_name = "raven2";
1374 else if (adev->pdev->device == 0x15d8)
1375 chip_name = "picasso";
1377 chip_name = "raven";
1380 chip_name = "arcturus";
1383 chip_name = "renoir";
1389 /* No CPG in Arcturus */
1390 if (adev->asic_type != CHIP_ARCTURUS) {
1391 r = gfx_v9_0_init_cp_gfx_microcode(adev, chip_name);
1396 r = gfx_v9_0_init_rlc_microcode(adev, chip_name);
1400 r = gfx_v9_0_init_cp_compute_microcode(adev, chip_name);
1407 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
1410 const struct cs_section_def *sect = NULL;
1411 const struct cs_extent_def *ext = NULL;
1413 /* begin clear state */
1415 /* context control state */
1418 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
1419 for (ext = sect->section; ext->extent != NULL; ++ext) {
1420 if (sect->id == SECT_CONTEXT)
1421 count += 2 + ext->reg_count;
1427 /* end clear state */
1435 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
1436 volatile u32 *buffer)
1439 const struct cs_section_def *sect = NULL;
1440 const struct cs_extent_def *ext = NULL;
1442 if (adev->gfx.rlc.cs_data == NULL)
1447 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1448 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1450 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
1451 buffer[count++] = cpu_to_le32(0x80000000);
1452 buffer[count++] = cpu_to_le32(0x80000000);
1454 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
1455 for (ext = sect->section; ext->extent != NULL; ++ext) {
1456 if (sect->id == SECT_CONTEXT) {
1458 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
1459 buffer[count++] = cpu_to_le32(ext->reg_index -
1460 PACKET3_SET_CONTEXT_REG_START);
1461 for (i = 0; i < ext->reg_count; i++)
1462 buffer[count++] = cpu_to_le32(ext->extent[i]);
1469 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1470 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
1472 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
1473 buffer[count++] = cpu_to_le32(0);
1476 static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
1478 struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
1479 uint32_t pg_always_on_cu_num = 2;
1480 uint32_t always_on_cu_num;
1482 uint32_t mask, cu_bitmap, counter;
1484 if (adev->flags & AMD_IS_APU)
1485 always_on_cu_num = 4;
1486 else if (adev->asic_type == CHIP_VEGA12)
1487 always_on_cu_num = 8;
1489 always_on_cu_num = 12;
1491 mutex_lock(&adev->grbm_idx_mutex);
1492 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1493 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1497 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1499 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
1500 if (cu_info->bitmap[i][j] & mask) {
1501 if (counter == pg_always_on_cu_num)
1502 WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
1503 if (counter < always_on_cu_num)
1512 WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap);
1513 cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
1516 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1517 mutex_unlock(&adev->grbm_idx_mutex);
1520 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
1524 /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1525 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1526 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
1527 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1528 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));
1530 /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1531 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1533 /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1534 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);
1536 mutex_lock(&adev->grbm_idx_mutex);
1537 /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1538 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1539 WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1541 /* set mmRLC_LB_PARAMS = 0x003F_1006 */
1542 data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1543 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1544 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1545 WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1547 /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1548 data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1551 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1554 * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven),
1555 * programmed in gfx_v9_0_init_always_on_cu_mask()
1558 /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1559 * but used for RLC_LB_CNTL configuration */
1560 data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1561 data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1562 data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1563 WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1564 mutex_unlock(&adev->grbm_idx_mutex);
1566 gfx_v9_0_init_always_on_cu_mask(adev);
1569 static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
1573 /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1574 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1575 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8);
1576 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1577 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16));
1579 /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1580 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1582 /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1583 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800);
1585 mutex_lock(&adev->grbm_idx_mutex);
1586 /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1587 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1588 WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1590 /* set mmRLC_LB_PARAMS = 0x003F_1006 */
1591 data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1592 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1593 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1594 WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1596 /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1597 data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1600 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1603 * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON),
1604 * programmed in gfx_v9_0_init_always_on_cu_mask()
1607 /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1608 * but used for RLC_LB_CNTL configuration */
1609 data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1610 data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1611 data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1612 WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1613 mutex_unlock(&adev->grbm_idx_mutex);
1615 gfx_v9_0_init_always_on_cu_mask(adev);
1618 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
1620 WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
1623 static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
1628 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
1630 const struct cs_section_def *cs_data;
1633 adev->gfx.rlc.cs_data = gfx9_cs_data;
1635 cs_data = adev->gfx.rlc.cs_data;
1638 /* init clear state block */
1639 r = amdgpu_gfx_rlc_init_csb(adev);
1644 if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR) {
1645 /* TODO: double check the cp_table_size for RV */
1646 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1647 r = amdgpu_gfx_rlc_init_cpt(adev);
1652 switch (adev->asic_type) {
1654 gfx_v9_0_init_lbpw(adev);
1657 gfx_v9_4_init_lbpw(adev);
1666 static int gfx_v9_0_csb_vram_pin(struct amdgpu_device *adev)
1670 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
1671 if (unlikely(r != 0))
1674 r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
1675 AMDGPU_GEM_DOMAIN_VRAM);
1677 adev->gfx.rlc.clear_state_gpu_addr =
1678 amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
1680 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1685 static void gfx_v9_0_csb_vram_unpin(struct amdgpu_device *adev)
1689 if (!adev->gfx.rlc.clear_state_obj)
1692 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
1693 if (likely(r == 0)) {
1694 amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
1695 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1699 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
1701 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1702 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
1705 static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
1709 const __le32 *fw_data;
1712 size_t mec_hpd_size;
1714 const struct gfx_firmware_header_v1_0 *mec_hdr;
1716 bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1718 /* take ownership of the relevant compute queues */
1719 amdgpu_gfx_compute_queue_acquire(adev);
1720 mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
1722 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1723 AMDGPU_GEM_DOMAIN_VRAM,
1724 &adev->gfx.mec.hpd_eop_obj,
1725 &adev->gfx.mec.hpd_eop_gpu_addr,
1728 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1729 gfx_v9_0_mec_fini(adev);
1733 memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
1735 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1736 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1738 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1740 fw_data = (const __le32 *)
1741 (adev->gfx.mec_fw->data +
1742 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1743 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
1745 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
1746 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1747 &adev->gfx.mec.mec_fw_obj,
1748 &adev->gfx.mec.mec_fw_gpu_addr,
1751 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
1752 gfx_v9_0_mec_fini(adev);
1756 memcpy(fw, fw_data, fw_size);
1758 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1759 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1764 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
1766 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1767 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1768 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1769 (address << SQ_IND_INDEX__INDEX__SHIFT) |
1770 (SQ_IND_INDEX__FORCE_READ_MASK));
1771 return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1774 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
1775 uint32_t wave, uint32_t thread,
1776 uint32_t regno, uint32_t num, uint32_t *out)
1778 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1779 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1780 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1781 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
1782 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
1783 (SQ_IND_INDEX__FORCE_READ_MASK) |
1784 (SQ_IND_INDEX__AUTO_INCR_MASK));
1786 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1789 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
1791 /* type 1 wave data */
1792 dst[(*no_fields)++] = 1;
1793 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
1794 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
1795 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
1796 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
1797 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
1798 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
1799 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
1800 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
1801 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
1802 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
1803 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
1804 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
1805 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
1806 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
1809 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
1810 uint32_t wave, uint32_t start,
1811 uint32_t size, uint32_t *dst)
1814 adev, simd, wave, 0,
1815 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
1818 static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
1819 uint32_t wave, uint32_t thread,
1820 uint32_t start, uint32_t size,
1824 adev, simd, wave, thread,
1825 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
1828 static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
1829 u32 me, u32 pipe, u32 q, u32 vm)
1831 soc15_grbm_select(adev, me, pipe, q, vm);
1834 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
1835 .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
1836 .select_se_sh = &gfx_v9_0_select_se_sh,
1837 .read_wave_data = &gfx_v9_0_read_wave_data,
1838 .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
1839 .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
1840 .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
1841 .ras_error_inject = &gfx_v9_0_ras_error_inject,
1842 .query_ras_error_count = &gfx_v9_0_query_ras_error_count
1845 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
1850 adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
1852 switch (adev->asic_type) {
1854 adev->gfx.config.max_hw_contexts = 8;
1855 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1856 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1857 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1858 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1859 gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
1862 adev->gfx.config.max_hw_contexts = 8;
1863 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1864 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1865 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1866 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1867 gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
1868 DRM_INFO("fix gfx.config for vega12\n");
1871 adev->gfx.config.max_hw_contexts = 8;
1872 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1873 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1874 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1875 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1876 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
1877 gb_addr_config &= ~0xf3e777ff;
1878 gb_addr_config |= 0x22014042;
1879 /* check vbios table if gpu info is not available */
1880 err = amdgpu_atomfirmware_get_gfx_info(adev);
1885 adev->gfx.config.max_hw_contexts = 8;
1886 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1887 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1888 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1889 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1890 if (adev->rev_id >= 8)
1891 gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
1893 gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
1896 adev->gfx.config.max_hw_contexts = 8;
1897 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1898 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1899 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1900 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1901 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
1902 gb_addr_config &= ~0xf3e777ff;
1903 gb_addr_config |= 0x22014042;
1906 adev->gfx.config.max_hw_contexts = 8;
1907 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1908 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1909 adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
1910 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1911 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
1912 gb_addr_config &= ~0xf3e777ff;
1913 gb_addr_config |= 0x22010042;
1920 adev->gfx.config.gb_addr_config = gb_addr_config;
1922 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
1924 adev->gfx.config.gb_addr_config,
1928 adev->gfx.config.max_tile_pipes =
1929 adev->gfx.config.gb_addr_config_fields.num_pipes;
1931 adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
1933 adev->gfx.config.gb_addr_config,
1936 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
1938 adev->gfx.config.gb_addr_config,
1940 MAX_COMPRESSED_FRAGS);
1941 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
1943 adev->gfx.config.gb_addr_config,
1946 adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
1948 adev->gfx.config.gb_addr_config,
1950 NUM_SHADER_ENGINES);
1951 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
1953 adev->gfx.config.gb_addr_config,
1955 PIPE_INTERLEAVE_SIZE));
1960 static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev,
1961 struct amdgpu_ngg_buf *ngg_buf,
1963 int default_size_se)
1968 dev_err(adev->dev, "Buffer size is invalid: %d\n", size_se);
1971 size_se = size_se ? size_se : default_size_se;
1973 ngg_buf->size = size_se * adev->gfx.config.max_shader_engines;
1974 r = amdgpu_bo_create_kernel(adev, ngg_buf->size,
1975 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1980 dev_err(adev->dev, "(%d) failed to create NGG buffer\n", r);
1983 ngg_buf->bo_size = amdgpu_bo_size(ngg_buf->bo);
1988 static int gfx_v9_0_ngg_fini(struct amdgpu_device *adev)
1992 for (i = 0; i < NGG_BUF_MAX; i++)
1993 amdgpu_bo_free_kernel(&adev->gfx.ngg.buf[i].bo,
1994 &adev->gfx.ngg.buf[i].gpu_addr,
1997 memset(&adev->gfx.ngg.buf[0], 0,
1998 sizeof(struct amdgpu_ngg_buf) * NGG_BUF_MAX);
2000 adev->gfx.ngg.init = false;
2005 static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
2009 if (!amdgpu_ngg || adev->gfx.ngg.init == true)
2012 /* GDS reserve memory: 64 bytes alignment */
2013 adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
2014 adev->gds.gds_size -= adev->gfx.ngg.gds_reserve_size;
2015 adev->gfx.ngg.gds_reserve_addr = RREG32_SOC15(GC, 0, mmGDS_VMID0_BASE);
2016 adev->gfx.ngg.gds_reserve_addr += RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
2018 /* Primitive Buffer */
2019 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM],
2020 amdgpu_prim_buf_per_se,
2023 dev_err(adev->dev, "Failed to create Primitive Buffer\n");
2027 /* Position Buffer */
2028 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS],
2029 amdgpu_pos_buf_per_se,
2032 dev_err(adev->dev, "Failed to create Position Buffer\n");
2036 /* Control Sideband */
2037 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL],
2038 amdgpu_cntl_sb_buf_per_se,
2041 dev_err(adev->dev, "Failed to create Control Sideband Buffer\n");
2045 /* Parameter Cache, not created by default */
2046 if (amdgpu_param_buf_per_se <= 0)
2049 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM],
2050 amdgpu_param_buf_per_se,
2053 dev_err(adev->dev, "Failed to create Parameter Cache\n");
2058 adev->gfx.ngg.init = true;
2061 gfx_v9_0_ngg_fini(adev);
2065 static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
2067 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2074 /* Program buffer size */
2075 data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE,
2076 adev->gfx.ngg.buf[NGG_PRIM].size >> 8);
2077 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE,
2078 adev->gfx.ngg.buf[NGG_POS].size >> 8);
2079 WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
2081 data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE,
2082 adev->gfx.ngg.buf[NGG_CNTL].size >> 8);
2083 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE,
2084 adev->gfx.ngg.buf[NGG_PARAM].size >> 10);
2085 WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
2087 /* Program buffer base address */
2088 base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
2089 data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base);
2090 WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data);
2092 base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
2093 data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base);
2094 WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data);
2096 base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
2097 data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base);
2098 WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data);
2100 base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
2101 data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base);
2102 WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data);
2104 base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
2105 data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base);
2106 WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data);
2108 base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
2109 data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base);
2110 WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data);
2112 /* Clear GDS reserved memory */
2113 r = amdgpu_ring_alloc(ring, 17);
2115 DRM_ERROR("amdgpu: NGG failed to lock ring %s (%d).\n",
2120 gfx_v9_0_write_data_to_reg(ring, 0, false,
2121 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
2122 (adev->gds.gds_size +
2123 adev->gfx.ngg.gds_reserve_size));
2125 amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
2126 amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
2127 PACKET3_DMA_DATA_DST_SEL(1) |
2128 PACKET3_DMA_DATA_SRC_SEL(2)));
2129 amdgpu_ring_write(ring, 0);
2130 amdgpu_ring_write(ring, 0);
2131 amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr);
2132 amdgpu_ring_write(ring, 0);
2133 amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
2134 adev->gfx.ngg.gds_reserve_size);
2136 gfx_v9_0_write_data_to_reg(ring, 0, false,
2137 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0);
2139 amdgpu_ring_commit(ring);
2144 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
2145 int mec, int pipe, int queue)
2149 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
2151 ring = &adev->gfx.compute_ring[ring_id];
2156 ring->queue = queue;
2158 ring->ring_obj = NULL;
2159 ring->use_doorbell = true;
2160 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
2161 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
2162 + (ring_id * GFX9_MEC_HPD_SIZE);
2163 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
2165 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
2166 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
2169 /* type-2 packets are deprecated on MEC, use type-3 instead */
2170 r = amdgpu_ring_init(adev, ring, 1024,
2171 &adev->gfx.eop_irq, irq_type);
2179 static int gfx_v9_0_sw_init(void *handle)
2181 int i, j, k, r, ring_id;
2182 struct amdgpu_ring *ring;
2183 struct amdgpu_kiq *kiq;
2184 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2186 switch (adev->asic_type) {
2193 adev->gfx.mec.num_mec = 2;
2196 adev->gfx.mec.num_mec = 1;
2200 adev->gfx.mec.num_pipe_per_mec = 4;
2201 adev->gfx.mec.num_queue_per_pipe = 8;
2204 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
2208 /* Privileged reg */
2209 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
2210 &adev->gfx.priv_reg_irq);
2214 /* Privileged inst */
2215 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
2216 &adev->gfx.priv_inst_irq);
2221 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_ECC_ERROR,
2222 &adev->gfx.cp_ecc_error_irq);
2227 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_FUE_ERROR,
2228 &adev->gfx.cp_ecc_error_irq);
2232 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
2234 gfx_v9_0_scratch_init(adev);
2236 r = gfx_v9_0_init_microcode(adev);
2238 DRM_ERROR("Failed to load gfx firmware!\n");
2242 r = adev->gfx.rlc.funcs->init(adev);
2244 DRM_ERROR("Failed to init rlc BOs!\n");
2248 r = gfx_v9_0_mec_init(adev);
2250 DRM_ERROR("Failed to init MEC BOs!\n");
2254 /* set up the gfx ring */
2255 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2256 ring = &adev->gfx.gfx_ring[i];
2257 ring->ring_obj = NULL;
2259 sprintf(ring->name, "gfx");
2261 sprintf(ring->name, "gfx_%d", i);
2262 ring->use_doorbell = true;
2263 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
2264 r = amdgpu_ring_init(adev, ring, 1024,
2265 &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
2270 /* set up the compute queues - allocate horizontally across pipes */
2272 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
2273 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
2274 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
2275 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
2278 r = gfx_v9_0_compute_ring_init(adev,
2289 r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE);
2291 DRM_ERROR("Failed to init KIQ BOs!\n");
2295 kiq = &adev->gfx.kiq;
2296 r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
2300 /* create MQD for all compute queues as wel as KIQ for SRIOV case */
2301 r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
2305 adev->gfx.ce_ram_size = 0x8000;
2307 r = gfx_v9_0_gpu_early_init(adev);
2311 r = gfx_v9_0_ngg_init(adev);
2319 static int gfx_v9_0_sw_fini(void *handle)
2322 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2324 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) &&
2326 struct ras_common_if *ras_if = adev->gfx.ras_if;
2327 struct ras_ih_if ih_info = {
2331 amdgpu_ras_debugfs_remove(adev, ras_if);
2332 amdgpu_ras_sysfs_remove(adev, ras_if);
2333 amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
2334 amdgpu_ras_feature_enable(adev, ras_if, 0);
2338 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2339 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
2340 for (i = 0; i < adev->gfx.num_compute_rings; i++)
2341 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
2343 amdgpu_gfx_mqd_sw_fini(adev);
2344 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
2345 amdgpu_gfx_kiq_fini(adev);
2347 gfx_v9_0_mec_fini(adev);
2348 gfx_v9_0_ngg_fini(adev);
2349 amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
2350 if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR) {
2351 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
2352 &adev->gfx.rlc.cp_table_gpu_addr,
2353 (void **)&adev->gfx.rlc.cp_table_ptr);
2355 gfx_v9_0_free_microcode(adev);
2361 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
2366 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
2370 if (instance == 0xffffffff)
2371 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
2373 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
2375 if (se_num == 0xffffffff)
2376 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
2378 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
2380 if (sh_num == 0xffffffff)
2381 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
2383 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
2385 WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
2388 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
2392 data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
2393 data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
2395 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
2396 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
2398 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
2399 adev->gfx.config.max_sh_per_se);
2401 return (~data) & mask;
2404 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
2409 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
2410 adev->gfx.config.max_sh_per_se;
2412 mutex_lock(&adev->grbm_idx_mutex);
2413 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2414 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2415 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
2416 data = gfx_v9_0_get_rb_active_bitmap(adev);
2417 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
2418 rb_bitmap_width_per_sh);
2421 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2422 mutex_unlock(&adev->grbm_idx_mutex);
2424 adev->gfx.config.backend_enable_mask = active_rbs;
2425 adev->gfx.config.num_rbs = hweight32(active_rbs);
2428 #define DEFAULT_SH_MEM_BASES (0x6000)
2429 #define FIRST_COMPUTE_VMID (8)
2430 #define LAST_COMPUTE_VMID (16)
2431 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
2434 uint32_t sh_mem_config;
2435 uint32_t sh_mem_bases;
2438 * Configure apertures:
2439 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
2440 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
2441 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
2443 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
2445 sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
2446 SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
2447 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
2449 mutex_lock(&adev->srbm_mutex);
2450 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
2451 soc15_grbm_select(adev, 0, 0, 0, i);
2452 /* CP and shaders */
2453 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
2454 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
2456 soc15_grbm_select(adev, 0, 0, 0, 0);
2457 mutex_unlock(&adev->srbm_mutex);
2459 /* Initialize all compute VMIDs to have no GDS, GWS, or OA
2460 acccess. These should be enabled by FW for target VMIDs. */
2461 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
2462 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
2463 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
2464 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
2465 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
2469 static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev)
2474 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
2475 * access. Compute VMIDs should be enabled by FW for target VMIDs,
2476 * the driver can enable them for graphics. VMID0 should maintain
2477 * access so that HWS firmware can save/restore entries.
2479 for (vmid = 1; vmid < 16; vmid++) {
2480 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0);
2481 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0);
2482 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0);
2483 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, vmid, 0);
2487 static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
2492 WREG32_FIELD15_RLC(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
2494 gfx_v9_0_tiling_mode_table_init(adev);
2496 gfx_v9_0_setup_rb(adev);
2497 gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
2498 adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
2500 /* XXX SH_MEM regs */
2501 /* where to put LDS, scratch, GPUVM in FSA64 space */
2502 mutex_lock(&adev->srbm_mutex);
2503 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) {
2504 soc15_grbm_select(adev, 0, 0, 0, i);
2505 /* CP and shaders */
2507 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2508 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2509 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2511 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2512 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, 0);
2514 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2515 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2516 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2518 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2519 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
2520 (adev->gmc.private_aperture_start >> 48));
2521 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
2522 (adev->gmc.shared_aperture_start >> 48));
2523 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, tmp);
2526 soc15_grbm_select(adev, 0, 0, 0, 0);
2528 mutex_unlock(&adev->srbm_mutex);
2530 gfx_v9_0_init_compute_vmid(adev);
2531 gfx_v9_0_init_gds_vmid(adev);
2534 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
2539 mutex_lock(&adev->grbm_idx_mutex);
2540 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2541 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2542 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
2543 for (k = 0; k < adev->usec_timeout; k++) {
2544 if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
2548 if (k == adev->usec_timeout) {
2549 gfx_v9_0_select_se_sh(adev, 0xffffffff,
2550 0xffffffff, 0xffffffff);
2551 mutex_unlock(&adev->grbm_idx_mutex);
2552 DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
2558 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2559 mutex_unlock(&adev->grbm_idx_mutex);
2561 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
2562 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
2563 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
2564 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
2565 for (k = 0; k < adev->usec_timeout; k++) {
2566 if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
2572 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
2575 u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
2577 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
2578 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
2579 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
2580 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
2582 WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
2585 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
2588 WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
2589 adev->gfx.rlc.clear_state_gpu_addr >> 32);
2590 WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
2591 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
2592 WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
2593 adev->gfx.rlc.clear_state_size);
2596 static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
2597 int indirect_offset,
2599 int *unique_indirect_regs,
2600 int unique_indirect_reg_count,
2601 int *indirect_start_offsets,
2602 int *indirect_start_offsets_count,
2603 int max_start_offsets_count)
2607 for (; indirect_offset < list_size; indirect_offset++) {
2608 WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count);
2609 indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
2610 *indirect_start_offsets_count = *indirect_start_offsets_count + 1;
2612 while (register_list_format[indirect_offset] != 0xFFFFFFFF) {
2613 indirect_offset += 2;
2615 /* look for the matching indice */
2616 for (idx = 0; idx < unique_indirect_reg_count; idx++) {
2617 if (unique_indirect_regs[idx] ==
2618 register_list_format[indirect_offset] ||
2619 !unique_indirect_regs[idx])
2623 BUG_ON(idx >= unique_indirect_reg_count);
2625 if (!unique_indirect_regs[idx])
2626 unique_indirect_regs[idx] = register_list_format[indirect_offset];
2633 static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
2635 int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2636 int unique_indirect_reg_count = 0;
2638 int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2639 int indirect_start_offsets_count = 0;
2645 u32 *register_list_format =
2646 kmemdup(adev->gfx.rlc.register_list_format,
2647 adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
2648 if (!register_list_format)
2651 /* setup unique_indirect_regs array and indirect_start_offsets array */
2652 unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
2653 gfx_v9_1_parse_ind_reg_list(register_list_format,
2654 adev->gfx.rlc.reg_list_format_direct_reg_list_length,
2655 adev->gfx.rlc.reg_list_format_size_bytes >> 2,
2656 unique_indirect_regs,
2657 unique_indirect_reg_count,
2658 indirect_start_offsets,
2659 &indirect_start_offsets_count,
2660 ARRAY_SIZE(indirect_start_offsets));
2662 /* enable auto inc in case it is disabled */
2663 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
2664 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
2665 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
2667 /* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
2668 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
2669 RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
2670 for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
2671 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
2672 adev->gfx.rlc.register_restore[i]);
2674 /* load indirect register */
2675 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2676 adev->gfx.rlc.reg_list_format_start);
2678 /* direct register portion */
2679 for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++)
2680 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2681 register_list_format[i]);
2683 /* indirect register portion */
2684 while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) {
2685 if (register_list_format[i] == 0xFFFFFFFF) {
2686 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2690 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2691 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2693 for (j = 0; j < unique_indirect_reg_count; j++) {
2694 if (register_list_format[i] == unique_indirect_regs[j]) {
2695 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j);
2700 BUG_ON(j >= unique_indirect_reg_count);
2705 /* set save/restore list size */
2706 list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
2707 list_size = list_size >> 1;
2708 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2709 adev->gfx.rlc.reg_restore_list_size);
2710 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
2712 /* write the starting offsets to RLC scratch ram */
2713 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2714 adev->gfx.rlc.starting_offsets_start);
2715 for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
2716 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2717 indirect_start_offsets[i]);
2719 /* load unique indirect regs*/
2720 for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
2721 if (unique_indirect_regs[i] != 0) {
2722 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0)
2723 + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i],
2724 unique_indirect_regs[i] & 0x3FFFF);
2726 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0)
2727 + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i],
2728 unique_indirect_regs[i] >> 20);
2732 kfree(register_list_format);
2736 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
2738 WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
2741 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
2745 uint32_t default_data = 0;
2747 default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
2748 if (enable == true) {
2749 /* enable GFXIP control over CGPG */
2750 data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2751 if(default_data != data)
2752 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2755 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
2756 data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
2757 if(default_data != data)
2758 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2760 /* restore GFXIP control over GCPG */
2761 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2762 if(default_data != data)
2763 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2767 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
2771 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2772 AMD_PG_SUPPORT_GFX_SMG |
2773 AMD_PG_SUPPORT_GFX_DMG)) {
2774 /* init IDLE_POLL_COUNT = 60 */
2775 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
2776 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
2777 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2778 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
2780 /* init RLC PG Delay */
2782 data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
2783 data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
2784 data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
2785 data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
2786 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
2788 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
2789 data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
2790 data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
2791 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
2793 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
2794 data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
2795 data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
2796 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
2798 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
2799 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
2801 /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
2802 data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
2803 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
2805 pwr_10_0_gfxip_control_over_cgpg(adev, true);
2809 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
2813 uint32_t default_data = 0;
2815 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2816 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2817 SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
2819 if (default_data != data)
2820 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2823 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
2827 uint32_t default_data = 0;
2829 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2830 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2831 SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
2833 if(default_data != data)
2834 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2837 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
2841 uint32_t default_data = 0;
2843 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2844 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2847 if(default_data != data)
2848 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2851 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
2854 uint32_t data, default_data;
2856 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2857 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2858 GFX_POWER_GATING_ENABLE,
2860 if(default_data != data)
2861 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2864 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
2867 uint32_t data, default_data;
2869 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2870 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2871 GFX_PIPELINE_PG_ENABLE,
2873 if(default_data != data)
2874 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2877 /* read any GFX register to wake up GFX */
2878 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
2881 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
2884 uint32_t data, default_data;
2886 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2887 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2888 STATIC_PER_CU_PG_ENABLE,
2890 if(default_data != data)
2891 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2894 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
2897 uint32_t data, default_data;
2899 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2900 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2901 DYN_PER_CU_PG_ENABLE,
2903 if(default_data != data)
2904 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2907 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
2909 gfx_v9_0_init_csb(adev);
2912 * Rlc save restore list is workable since v2_1.
2913 * And it's needed by gfxoff feature.
2915 if (adev->gfx.rlc.is_rlc_v2_1) {
2916 gfx_v9_1_init_rlc_save_restore_list(adev);
2917 gfx_v9_0_enable_save_restore_machine(adev);
2920 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2921 AMD_PG_SUPPORT_GFX_SMG |
2922 AMD_PG_SUPPORT_GFX_DMG |
2924 AMD_PG_SUPPORT_GDS |
2925 AMD_PG_SUPPORT_RLC_SMU_HS)) {
2926 WREG32(mmRLC_JUMP_TABLE_RESTORE,
2927 adev->gfx.rlc.cp_table_gpu_addr >> 8);
2928 gfx_v9_0_init_gfx_power_gating(adev);
2932 void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
2934 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
2935 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
2936 gfx_v9_0_wait_for_rlc_serdes(adev);
2939 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
2941 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2943 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
2947 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
2949 #ifdef AMDGPU_RLC_DEBUG_RETRY
2953 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
2956 /* carrizo do enable cp interrupt after cp inited */
2957 if (!(adev->flags & AMD_IS_APU)) {
2958 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
2962 #ifdef AMDGPU_RLC_DEBUG_RETRY
2963 /* RLC_GPM_GENERAL_6 : RLC Ucode version */
2964 rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
2965 if(rlc_ucode_ver == 0x108) {
2966 DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
2967 rlc_ucode_ver, adev->gfx.rlc_fw_version);
2968 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
2969 * default is 0x9C4 to create a 100us interval */
2970 WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
2971 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
2972 * to disable the page fault retry interrupts, default is
2974 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
2979 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
2981 const struct rlc_firmware_header_v2_0 *hdr;
2982 const __le32 *fw_data;
2983 unsigned i, fw_size;
2985 if (!adev->gfx.rlc_fw)
2988 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2989 amdgpu_ucode_print_rlc_hdr(&hdr->header);
2991 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2992 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2993 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
2995 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
2996 RLCG_UCODE_LOADING_START_ADDRESS);
2997 for (i = 0; i < fw_size; i++)
2998 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
2999 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
3004 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
3008 if (amdgpu_sriov_vf(adev)) {
3009 gfx_v9_0_init_csb(adev);
3013 adev->gfx.rlc.funcs->stop(adev);
3016 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
3018 gfx_v9_0_init_pg(adev);
3020 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3021 /* legacy rlc firmware loading */
3022 r = gfx_v9_0_rlc_load_microcode(adev);
3027 switch (adev->asic_type) {
3029 if (amdgpu_lbpw == 0)
3030 gfx_v9_0_enable_lbpw(adev, false);
3032 gfx_v9_0_enable_lbpw(adev, true);
3035 if (amdgpu_lbpw > 0)
3036 gfx_v9_0_enable_lbpw(adev, true);
3038 gfx_v9_0_enable_lbpw(adev, false);
3044 adev->gfx.rlc.funcs->start(adev);
3049 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
3052 u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
3054 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
3055 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
3056 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
3058 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
3059 adev->gfx.gfx_ring[i].sched.ready = false;
3061 WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
3065 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
3067 const struct gfx_firmware_header_v1_0 *pfp_hdr;
3068 const struct gfx_firmware_header_v1_0 *ce_hdr;
3069 const struct gfx_firmware_header_v1_0 *me_hdr;
3070 const __le32 *fw_data;
3071 unsigned i, fw_size;
3073 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
3076 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
3077 adev->gfx.pfp_fw->data;
3078 ce_hdr = (const struct gfx_firmware_header_v1_0 *)
3079 adev->gfx.ce_fw->data;
3080 me_hdr = (const struct gfx_firmware_header_v1_0 *)
3081 adev->gfx.me_fw->data;
3083 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
3084 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
3085 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
3087 gfx_v9_0_cp_gfx_enable(adev, false);
3090 fw_data = (const __le32 *)
3091 (adev->gfx.pfp_fw->data +
3092 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
3093 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
3094 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
3095 for (i = 0; i < fw_size; i++)
3096 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
3097 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
3100 fw_data = (const __le32 *)
3101 (adev->gfx.ce_fw->data +
3102 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
3103 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
3104 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
3105 for (i = 0; i < fw_size; i++)
3106 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
3107 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
3110 fw_data = (const __le32 *)
3111 (adev->gfx.me_fw->data +
3112 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
3113 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
3114 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
3115 for (i = 0; i < fw_size; i++)
3116 WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
3117 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
3122 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
3124 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
3125 const struct cs_section_def *sect = NULL;
3126 const struct cs_extent_def *ext = NULL;
3130 WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
3131 WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
3133 gfx_v9_0_cp_gfx_enable(adev, true);
3135 r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
3137 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3141 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3142 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3144 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3145 amdgpu_ring_write(ring, 0x80000000);
3146 amdgpu_ring_write(ring, 0x80000000);
3148 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
3149 for (ext = sect->section; ext->extent != NULL; ++ext) {
3150 if (sect->id == SECT_CONTEXT) {
3151 amdgpu_ring_write(ring,
3152 PACKET3(PACKET3_SET_CONTEXT_REG,
3154 amdgpu_ring_write(ring,
3155 ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
3156 for (i = 0; i < ext->reg_count; i++)
3157 amdgpu_ring_write(ring, ext->extent[i]);
3162 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3163 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3165 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3166 amdgpu_ring_write(ring, 0);
3168 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
3169 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
3170 amdgpu_ring_write(ring, 0x8000);
3171 amdgpu_ring_write(ring, 0x8000);
3173 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
3174 tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
3175 (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
3176 amdgpu_ring_write(ring, tmp);
3177 amdgpu_ring_write(ring, 0);
3179 amdgpu_ring_commit(ring);
3184 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
3186 struct amdgpu_ring *ring;
3189 u64 rb_addr, rptr_addr, wptr_gpu_addr;
3191 /* Set the write pointer delay */
3192 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
3194 /* set the RB to use vmid 0 */
3195 WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
3197 /* Set ring buffer size */
3198 ring = &adev->gfx.gfx_ring[0];
3199 rb_bufsz = order_base_2(ring->ring_size / 8);
3200 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
3201 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
3203 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
3205 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3207 /* Initialize the ring buffer's write pointers */
3209 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
3210 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3212 /* set the wb address wether it's enabled or not */
3213 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3214 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
3215 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3217 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3218 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
3219 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
3222 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3224 rb_addr = ring->gpu_addr >> 8;
3225 WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
3226 WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
3228 tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
3229 if (ring->use_doorbell) {
3230 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3231 DOORBELL_OFFSET, ring->doorbell_index);
3232 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3235 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
3237 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
3239 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
3240 DOORBELL_RANGE_LOWER, ring->doorbell_index);
3241 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
3243 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
3244 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
3247 /* start the ring */
3248 gfx_v9_0_cp_gfx_start(adev);
3249 ring->sched.ready = true;
3254 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
3259 WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0);
3261 WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
3262 (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
3263 for (i = 0; i < adev->gfx.num_compute_rings; i++)
3264 adev->gfx.compute_ring[i].sched.ready = false;
3265 adev->gfx.kiq.ring.sched.ready = false;
3270 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3272 const struct gfx_firmware_header_v1_0 *mec_hdr;
3273 const __le32 *fw_data;
3277 if (!adev->gfx.mec_fw)
3280 gfx_v9_0_cp_compute_enable(adev, false);
3282 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3283 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3285 fw_data = (const __le32 *)
3286 (adev->gfx.mec_fw->data +
3287 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
3289 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
3290 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
3291 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
3293 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
3294 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
3295 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
3296 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
3299 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3300 mec_hdr->jt_offset);
3301 for (i = 0; i < mec_hdr->jt_size; i++)
3302 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
3303 le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
3305 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3306 adev->gfx.mec_fw_version);
3307 /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
3313 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
3316 struct amdgpu_device *adev = ring->adev;
3318 /* tell RLC which is KIQ queue */
3319 tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
3321 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
3322 WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
3324 WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
3327 static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
3329 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
3330 uint64_t queue_mask = 0;
3333 for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
3334 if (!test_bit(i, adev->gfx.mec.queue_bitmap))
3337 /* This situation may be hit in the future if a new HW
3338 * generation exposes more than 64 queues. If so, the
3339 * definition of queue_mask needs updating */
3340 if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
3341 DRM_ERROR("Invalid KCQ enabled: %d\n", i);
3345 queue_mask |= (1ull << i);
3348 r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 8);
3350 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
3355 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
3356 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
3357 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
3358 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
3359 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
3360 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
3361 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
3362 amdgpu_ring_write(kiq_ring, 0); /* oac mask */
3363 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
3364 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3365 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
3366 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
3367 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3369 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
3370 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
3371 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
3372 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
3373 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
3374 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
3375 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
3376 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
3377 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
3378 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
3379 PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
3380 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
3381 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
3382 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
3383 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
3384 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
3385 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
3388 r = amdgpu_ring_test_helper(kiq_ring);
3390 DRM_ERROR("KCQ enable failed\n");
3395 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
3397 struct amdgpu_device *adev = ring->adev;
3398 struct v9_mqd *mqd = ring->mqd_ptr;
3399 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
3402 mqd->header = 0xC0310800;
3403 mqd->compute_pipelinestat_enable = 0x00000001;
3404 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3405 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3406 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3407 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3408 mqd->compute_static_thread_mgmt_se4 = 0xffffffff;
3409 mqd->compute_static_thread_mgmt_se5 = 0xffffffff;
3410 mqd->compute_static_thread_mgmt_se6 = 0xffffffff;
3411 mqd->compute_static_thread_mgmt_se7 = 0xffffffff;
3412 mqd->compute_misc_reserved = 0x00000003;
3414 mqd->dynamic_cu_mask_addr_lo =
3415 lower_32_bits(ring->mqd_gpu_addr
3416 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3417 mqd->dynamic_cu_mask_addr_hi =
3418 upper_32_bits(ring->mqd_gpu_addr
3419 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3421 eop_base_addr = ring->eop_gpu_addr >> 8;
3422 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
3423 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
3425 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3426 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
3427 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3428 (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
3430 mqd->cp_hqd_eop_control = tmp;
3432 /* enable doorbell? */
3433 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3435 if (ring->use_doorbell) {
3436 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3437 DOORBELL_OFFSET, ring->doorbell_index);
3438 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3440 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3441 DOORBELL_SOURCE, 0);
3442 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3445 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3449 mqd->cp_hqd_pq_doorbell_control = tmp;
3451 /* disable the queue if it's active */
3453 mqd->cp_hqd_dequeue_request = 0;
3454 mqd->cp_hqd_pq_rptr = 0;
3455 mqd->cp_hqd_pq_wptr_lo = 0;
3456 mqd->cp_hqd_pq_wptr_hi = 0;
3458 /* set the pointer to the MQD */
3459 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
3460 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
3462 /* set MQD vmid to 0 */
3463 tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
3464 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3465 mqd->cp_mqd_control = tmp;
3467 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3468 hqd_gpu_addr = ring->gpu_addr >> 8;
3469 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3470 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3472 /* set up the HQD, this is similar to CP_RB0_CNTL */
3473 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
3474 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3475 (order_base_2(ring->ring_size / 4) - 1));
3476 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3477 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
3479 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
3481 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
3482 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
3483 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3484 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3485 mqd->cp_hqd_pq_control = tmp;
3487 /* set the wb address whether it's enabled or not */
3488 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3489 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3490 mqd->cp_hqd_pq_rptr_report_addr_hi =
3491 upper_32_bits(wb_gpu_addr) & 0xffff;
3493 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3494 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3495 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3496 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3499 /* enable the doorbell if requested */
3500 if (ring->use_doorbell) {
3501 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3502 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3503 DOORBELL_OFFSET, ring->doorbell_index);
3505 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3507 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3508 DOORBELL_SOURCE, 0);
3509 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3513 mqd->cp_hqd_pq_doorbell_control = tmp;
3515 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3517 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
3519 /* set the vmid for the queue */
3520 mqd->cp_hqd_vmid = 0;
3522 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
3523 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
3524 mqd->cp_hqd_persistent_state = tmp;
3526 /* set MIN_IB_AVAIL_SIZE */
3527 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
3528 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
3529 mqd->cp_hqd_ib_control = tmp;
3531 /* activate the queue */
3532 mqd->cp_hqd_active = 1;
3537 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
3539 struct amdgpu_device *adev = ring->adev;
3540 struct v9_mqd *mqd = ring->mqd_ptr;
3543 /* disable wptr polling */
3544 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3546 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
3547 mqd->cp_hqd_eop_base_addr_lo);
3548 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
3549 mqd->cp_hqd_eop_base_addr_hi);
3551 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3552 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_CONTROL,
3553 mqd->cp_hqd_eop_control);
3555 /* enable doorbell? */
3556 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3557 mqd->cp_hqd_pq_doorbell_control);
3559 /* disable the queue if it's active */
3560 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3561 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3562 for (j = 0; j < adev->usec_timeout; j++) {
3563 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3567 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3568 mqd->cp_hqd_dequeue_request);
3569 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR,
3570 mqd->cp_hqd_pq_rptr);
3571 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3572 mqd->cp_hqd_pq_wptr_lo);
3573 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3574 mqd->cp_hqd_pq_wptr_hi);
3577 /* set the pointer to the MQD */
3578 WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR,
3579 mqd->cp_mqd_base_addr_lo);
3580 WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR_HI,
3581 mqd->cp_mqd_base_addr_hi);
3583 /* set MQD vmid to 0 */
3584 WREG32_SOC15_RLC(GC, 0, mmCP_MQD_CONTROL,
3585 mqd->cp_mqd_control);
3587 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3588 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE,
3589 mqd->cp_hqd_pq_base_lo);
3590 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE_HI,
3591 mqd->cp_hqd_pq_base_hi);
3593 /* set up the HQD, this is similar to CP_RB0_CNTL */
3594 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_CONTROL,
3595 mqd->cp_hqd_pq_control);
3597 /* set the wb address whether it's enabled or not */
3598 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3599 mqd->cp_hqd_pq_rptr_report_addr_lo);
3600 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3601 mqd->cp_hqd_pq_rptr_report_addr_hi);
3603 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3604 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
3605 mqd->cp_hqd_pq_wptr_poll_addr_lo);
3606 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3607 mqd->cp_hqd_pq_wptr_poll_addr_hi);
3609 /* enable the doorbell if requested */
3610 if (ring->use_doorbell) {
3611 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
3612 (adev->doorbell_index.kiq * 2) << 2);
3613 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3614 (adev->doorbell_index.userqueue_end * 2) << 2);
3617 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3618 mqd->cp_hqd_pq_doorbell_control);
3620 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3621 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3622 mqd->cp_hqd_pq_wptr_lo);
3623 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3624 mqd->cp_hqd_pq_wptr_hi);
3626 /* set the vmid for the queue */
3627 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3629 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE,
3630 mqd->cp_hqd_persistent_state);
3632 /* activate the queue */
3633 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE,
3634 mqd->cp_hqd_active);
3636 if (ring->use_doorbell)
3637 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3642 static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
3644 struct amdgpu_device *adev = ring->adev;
3647 /* disable the queue if it's active */
3648 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3650 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3652 for (j = 0; j < adev->usec_timeout; j++) {
3653 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3658 if (j == AMDGPU_MAX_USEC_TIMEOUT) {
3659 DRM_DEBUG("KIQ dequeue request failed.\n");
3661 /* Manual disable if dequeue request times out */
3662 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE, 0);
3665 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3669 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IQ_TIMER, 0);
3670 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IB_CONTROL, 0);
3671 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
3672 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
3673 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
3674 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR, 0);
3675 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
3676 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
3681 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
3683 struct amdgpu_device *adev = ring->adev;
3684 struct v9_mqd *mqd = ring->mqd_ptr;
3685 int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
3687 gfx_v9_0_kiq_setting(ring);
3689 if (adev->in_gpu_reset) { /* for GPU_RESET case */
3690 /* reset MQD to a clean status */
3691 if (adev->gfx.mec.mqd_backup[mqd_idx])
3692 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3694 /* reset ring buffer */
3696 amdgpu_ring_clear_ring(ring);
3698 mutex_lock(&adev->srbm_mutex);
3699 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3700 gfx_v9_0_kiq_init_register(ring);
3701 soc15_grbm_select(adev, 0, 0, 0, 0);
3702 mutex_unlock(&adev->srbm_mutex);
3704 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3705 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3706 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3707 mutex_lock(&adev->srbm_mutex);
3708 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3709 gfx_v9_0_mqd_init(ring);
3710 gfx_v9_0_kiq_init_register(ring);
3711 soc15_grbm_select(adev, 0, 0, 0, 0);
3712 mutex_unlock(&adev->srbm_mutex);
3714 if (adev->gfx.mec.mqd_backup[mqd_idx])
3715 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3721 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
3723 struct amdgpu_device *adev = ring->adev;
3724 struct v9_mqd *mqd = ring->mqd_ptr;
3725 int mqd_idx = ring - &adev->gfx.compute_ring[0];
3727 if (!adev->in_gpu_reset && !adev->in_suspend) {
3728 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3729 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3730 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3731 mutex_lock(&adev->srbm_mutex);
3732 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3733 gfx_v9_0_mqd_init(ring);
3734 soc15_grbm_select(adev, 0, 0, 0, 0);
3735 mutex_unlock(&adev->srbm_mutex);
3737 if (adev->gfx.mec.mqd_backup[mqd_idx])
3738 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3739 } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
3740 /* reset MQD to a clean status */
3741 if (adev->gfx.mec.mqd_backup[mqd_idx])
3742 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3744 /* reset ring buffer */
3746 amdgpu_ring_clear_ring(ring);
3748 amdgpu_ring_clear_ring(ring);
3754 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
3756 struct amdgpu_ring *ring;
3759 ring = &adev->gfx.kiq.ring;
3761 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3762 if (unlikely(r != 0))
3765 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3766 if (unlikely(r != 0))
3769 gfx_v9_0_kiq_init_queue(ring);
3770 amdgpu_bo_kunmap(ring->mqd_obj);
3771 ring->mqd_ptr = NULL;
3772 amdgpu_bo_unreserve(ring->mqd_obj);
3773 ring->sched.ready = true;
3777 static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
3779 struct amdgpu_ring *ring = NULL;
3782 gfx_v9_0_cp_compute_enable(adev, true);
3784 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3785 ring = &adev->gfx.compute_ring[i];
3787 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3788 if (unlikely(r != 0))
3790 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3792 r = gfx_v9_0_kcq_init_queue(ring);
3793 amdgpu_bo_kunmap(ring->mqd_obj);
3794 ring->mqd_ptr = NULL;
3796 amdgpu_bo_unreserve(ring->mqd_obj);
3801 r = gfx_v9_0_kiq_kcq_enable(adev);
3806 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
3809 struct amdgpu_ring *ring;
3811 if (!(adev->flags & AMD_IS_APU))
3812 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3814 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3815 if (adev->asic_type != CHIP_ARCTURUS) {
3816 /* legacy firmware loading */
3817 r = gfx_v9_0_cp_gfx_load_microcode(adev);
3822 r = gfx_v9_0_cp_compute_load_microcode(adev);
3827 r = gfx_v9_0_kiq_resume(adev);
3831 if (adev->asic_type != CHIP_ARCTURUS) {
3832 r = gfx_v9_0_cp_gfx_resume(adev);
3837 r = gfx_v9_0_kcq_resume(adev);
3841 if (adev->asic_type != CHIP_ARCTURUS) {
3842 ring = &adev->gfx.gfx_ring[0];
3843 r = amdgpu_ring_test_helper(ring);
3848 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3849 ring = &adev->gfx.compute_ring[i];
3850 amdgpu_ring_test_helper(ring);
3853 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3858 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
3860 if (adev->asic_type != CHIP_ARCTURUS)
3861 gfx_v9_0_cp_gfx_enable(adev, enable);
3862 gfx_v9_0_cp_compute_enable(adev, enable);
3865 static int gfx_v9_0_hw_init(void *handle)
3868 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3870 if (!amdgpu_sriov_vf(adev))
3871 gfx_v9_0_init_golden_registers(adev);
3873 gfx_v9_0_constants_init(adev);
3875 r = gfx_v9_0_csb_vram_pin(adev);
3879 r = adev->gfx.rlc.funcs->resume(adev);
3883 r = gfx_v9_0_cp_resume(adev);
3887 if (adev->asic_type != CHIP_ARCTURUS) {
3888 r = gfx_v9_0_ngg_en(adev);
3896 static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev)
3899 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
3901 r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings);
3903 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
3905 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3906 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
3908 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
3909 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
3910 PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
3911 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
3912 PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
3913 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
3914 amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
3915 amdgpu_ring_write(kiq_ring, 0);
3916 amdgpu_ring_write(kiq_ring, 0);
3917 amdgpu_ring_write(kiq_ring, 0);
3919 r = amdgpu_ring_test_helper(kiq_ring);
3921 DRM_ERROR("KCQ disable failed\n");
3926 static int gfx_v9_0_hw_fini(void *handle)
3928 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3930 amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
3931 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3932 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3934 /* disable KCQ to avoid CPC touch memory not valid anymore */
3935 gfx_v9_0_kcq_disable(adev);
3937 if (amdgpu_sriov_vf(adev)) {
3938 gfx_v9_0_cp_gfx_enable(adev, false);
3939 /* must disable polling for SRIOV when hw finished, otherwise
3940 * CPC engine may still keep fetching WB address which is already
3941 * invalid after sw finished and trigger DMAR reading error in
3944 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3948 /* Use deinitialize sequence from CAIL when unbinding device from driver,
3949 * otherwise KIQ is hanging when binding back
3951 if (!adev->in_gpu_reset && !adev->in_suspend) {
3952 mutex_lock(&adev->srbm_mutex);
3953 soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
3954 adev->gfx.kiq.ring.pipe,
3955 adev->gfx.kiq.ring.queue, 0);
3956 gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring);
3957 soc15_grbm_select(adev, 0, 0, 0, 0);
3958 mutex_unlock(&adev->srbm_mutex);
3961 gfx_v9_0_cp_enable(adev, false);
3962 adev->gfx.rlc.funcs->stop(adev);
3964 gfx_v9_0_csb_vram_unpin(adev);
3969 static int gfx_v9_0_suspend(void *handle)
3971 return gfx_v9_0_hw_fini(handle);
3974 static int gfx_v9_0_resume(void *handle)
3976 return gfx_v9_0_hw_init(handle);
3979 static bool gfx_v9_0_is_idle(void *handle)
3981 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3983 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
3984 GRBM_STATUS, GUI_ACTIVE))
3990 static int gfx_v9_0_wait_for_idle(void *handle)
3993 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3995 for (i = 0; i < adev->usec_timeout; i++) {
3996 if (gfx_v9_0_is_idle(handle))
4003 static int gfx_v9_0_soft_reset(void *handle)
4005 u32 grbm_soft_reset = 0;
4007 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4010 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
4011 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
4012 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
4013 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4014 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4015 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4016 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
4017 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4018 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4019 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4020 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
4023 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4024 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4025 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4029 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
4030 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
4031 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4032 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4035 if (grbm_soft_reset) {
4037 adev->gfx.rlc.funcs->stop(adev);
4039 if (adev->asic_type != CHIP_ARCTURUS)
4040 /* Disable GFX parsing/prefetching */
4041 gfx_v9_0_cp_gfx_enable(adev, false);
4043 /* Disable MEC parsing/prefetching */
4044 gfx_v9_0_cp_compute_enable(adev, false);
4046 if (grbm_soft_reset) {
4047 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4048 tmp |= grbm_soft_reset;
4049 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
4050 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
4051 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4055 tmp &= ~grbm_soft_reset;
4056 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
4057 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4060 /* Wait a little for things to settle down */
4066 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4070 mutex_lock(&adev->gfx.gpu_clock_mutex);
4071 WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4072 clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
4073 ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4074 mutex_unlock(&adev->gfx.gpu_clock_mutex);
4078 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4080 uint32_t gds_base, uint32_t gds_size,
4081 uint32_t gws_base, uint32_t gws_size,
4082 uint32_t oa_base, uint32_t oa_size)
4084 struct amdgpu_device *adev = ring->adev;
4087 gfx_v9_0_write_data_to_reg(ring, 0, false,
4088 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
4092 gfx_v9_0_write_data_to_reg(ring, 0, false,
4093 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
4097 gfx_v9_0_write_data_to_reg(ring, 0, false,
4098 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
4099 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4102 gfx_v9_0_write_data_to_reg(ring, 0, false,
4103 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
4104 (1 << (oa_size + oa_base)) - (1 << oa_base));
4107 static const u32 vgpr_init_compute_shader[] =
4109 0xb07c0000, 0xbe8000ff,
4110 0x000000f8, 0xbf110800,
4111 0x7e000280, 0x7e020280,
4112 0x7e040280, 0x7e060280,
4113 0x7e080280, 0x7e0a0280,
4114 0x7e0c0280, 0x7e0e0280,
4115 0x80808800, 0xbe803200,
4116 0xbf84fff5, 0xbf9c0000,
4117 0xd28c0001, 0x0001007f,
4118 0xd28d0001, 0x0002027e,
4119 0x10020288, 0xb8810904,
4120 0xb7814000, 0xd1196a01,
4121 0x00000301, 0xbe800087,
4122 0xbefc00c1, 0xd89c4000,
4123 0x00020201, 0xd89cc080,
4124 0x00040401, 0x320202ff,
4125 0x00000800, 0x80808100,
4126 0xbf84fff8, 0x7e020280,
4127 0xbf810000, 0x00000000,
4130 static const u32 sgpr_init_compute_shader[] =
4132 0xb07c0000, 0xbe8000ff,
4133 0x0000005f, 0xbee50080,
4134 0xbe812c65, 0xbe822c65,
4135 0xbe832c65, 0xbe842c65,
4136 0xbe852c65, 0xb77c0005,
4137 0x80808500, 0xbf84fff8,
4138 0xbe800080, 0xbf810000,
4141 static const struct soc15_reg_entry vgpr_init_regs[] = {
4142 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4143 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4144 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4145 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4146 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x1000000 }, /* CU_GROUP_COUNT=1 */
4147 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 256*2 },
4148 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 1 },
4149 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4150 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x100007f }, /* VGPRS=15 (256 logical VGPRs, SGPRS=1 (16 SGPRs, BULKY=1 */
4151 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */
4154 static const struct soc15_reg_entry sgpr_init_regs[] = {
4155 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4156 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4157 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4158 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4159 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x1000000 }, /* CU_GROUP_COUNT=1 */
4160 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 256*2 },
4161 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 1 },
4162 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4163 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x340 }, /* SGPRS=13 (112 GPRS) */
4164 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
4167 static const struct soc15_reg_entry sec_ded_counter_registers[] = {
4168 { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1},
4169 { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1},
4170 { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1},
4171 { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1},
4172 { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1, 1},
4173 { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), 0, 1, 1},
4174 { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1},
4175 { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1},
4176 { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1},
4177 { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1},
4178 { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1},
4179 { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1},
4180 { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1},
4181 { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6},
4182 { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16},
4183 { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16},
4184 { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16},
4185 { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16},
4186 { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16},
4187 { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16},
4188 { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16},
4189 { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6},
4190 { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16},
4191 { SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16},
4192 { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1},
4193 { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1},
4194 { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32},
4195 { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32},
4196 { SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72},
4197 { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
4198 { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
4199 { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
4202 static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
4204 struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4207 /* only support when RAS is enabled */
4208 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4211 r = amdgpu_ring_alloc(ring, 7);
4213 DRM_ERROR("amdgpu: GDS workarounds failed to lock ring %s (%d).\n",
4218 WREG32_SOC15(GC, 0, mmGDS_VMID0_BASE, 0x00000000);
4219 WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, adev->gds.gds_size);
4221 amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
4222 amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
4223 PACKET3_DMA_DATA_DST_SEL(1) |
4224 PACKET3_DMA_DATA_SRC_SEL(2) |
4225 PACKET3_DMA_DATA_ENGINE(0)));
4226 amdgpu_ring_write(ring, 0);
4227 amdgpu_ring_write(ring, 0);
4228 amdgpu_ring_write(ring, 0);
4229 amdgpu_ring_write(ring, 0);
4230 amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
4231 adev->gds.gds_size);
4233 amdgpu_ring_commit(ring);
4235 for (i = 0; i < adev->usec_timeout; i++) {
4236 if (ring->wptr == gfx_v9_0_ring_get_rptr_compute(ring))
4241 if (i >= adev->usec_timeout)
4244 WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, 0x00000000);
4249 static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
4251 struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4252 struct amdgpu_ib ib;
4253 struct dma_fence *f = NULL;
4255 unsigned total_size, vgpr_offset, sgpr_offset;
4258 /* only support when RAS is enabled */
4259 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4262 /* bail if the compute ring is not ready */
4263 if (!ring->sched.ready)
4267 ((ARRAY_SIZE(vgpr_init_regs) * 3) + 4 + 5 + 2) * 4;
4269 ((ARRAY_SIZE(sgpr_init_regs) * 3) + 4 + 5 + 2) * 4;
4270 total_size = ALIGN(total_size, 256);
4271 vgpr_offset = total_size;
4272 total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
4273 sgpr_offset = total_size;
4274 total_size += sizeof(sgpr_init_compute_shader);
4276 /* allocate an indirect buffer to put the commands in */
4277 memset(&ib, 0, sizeof(ib));
4278 r = amdgpu_ib_get(adev, NULL, total_size, &ib);
4280 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
4284 /* load the compute shaders */
4285 for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++)
4286 ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i];
4288 for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
4289 ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
4291 /* init the ib length to 0 */
4295 /* write the register state for the compute dispatch */
4296 for (i = 0; i < ARRAY_SIZE(vgpr_init_regs); i++) {
4297 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4298 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs[i])
4299 - PACKET3_SET_SH_REG_START;
4300 ib.ptr[ib.length_dw++] = vgpr_init_regs[i].reg_value;
4302 /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4303 gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
4304 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4305 ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4306 - PACKET3_SET_SH_REG_START;
4307 ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4308 ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4310 /* write dispatch packet */
4311 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4312 ib.ptr[ib.length_dw++] = 128; /* x */
4313 ib.ptr[ib.length_dw++] = 1; /* y */
4314 ib.ptr[ib.length_dw++] = 1; /* z */
4315 ib.ptr[ib.length_dw++] =
4316 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4318 /* write CS partial flush packet */
4319 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4320 ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4323 /* write the register state for the compute dispatch */
4324 for (i = 0; i < ARRAY_SIZE(sgpr_init_regs); i++) {
4325 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4326 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr_init_regs[i])
4327 - PACKET3_SET_SH_REG_START;
4328 ib.ptr[ib.length_dw++] = sgpr_init_regs[i].reg_value;
4330 /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4331 gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4332 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4333 ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4334 - PACKET3_SET_SH_REG_START;
4335 ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4336 ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4338 /* write dispatch packet */
4339 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4340 ib.ptr[ib.length_dw++] = 128; /* x */
4341 ib.ptr[ib.length_dw++] = 1; /* y */
4342 ib.ptr[ib.length_dw++] = 1; /* z */
4343 ib.ptr[ib.length_dw++] =
4344 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4346 /* write CS partial flush packet */
4347 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4348 ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4350 /* shedule the ib on the ring */
4351 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
4353 DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
4357 /* wait for the GPU to finish processing the IB */
4358 r = dma_fence_wait(f, false);
4360 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
4364 /* read back registers to clear the counters */
4365 mutex_lock(&adev->grbm_idx_mutex);
4366 for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) {
4367 for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) {
4368 for (k = 0; k < sec_ded_counter_registers[i].instance; k++) {
4369 gfx_v9_0_select_se_sh(adev, j, 0x0, k);
4370 RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
4374 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
4375 mutex_unlock(&adev->grbm_idx_mutex);
4378 amdgpu_ib_free(adev, &ib, NULL);
4384 static int gfx_v9_0_early_init(void *handle)
4386 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4388 if (adev->asic_type == CHIP_ARCTURUS)
4389 adev->gfx.num_gfx_rings = 0;
4391 adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
4392 adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
4393 gfx_v9_0_set_ring_funcs(adev);
4394 gfx_v9_0_set_irq_funcs(adev);
4395 gfx_v9_0_set_gds_init(adev);
4396 gfx_v9_0_set_rlc_funcs(adev);
4401 static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
4402 struct ras_err_data *err_data,
4403 struct amdgpu_iv_entry *entry);
4405 static int gfx_v9_0_ecc_late_init(void *handle)
4407 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4408 struct ras_ih_if ih_info = {
4409 .cb = gfx_v9_0_process_ras_data_cb,
4413 r = amdgpu_gfx_ras_late_init(adev, &ih_info);
4417 r = gfx_v9_0_do_edc_gds_workarounds(adev);
4421 /* requires IBs so do in late init after IB pool is initialized */
4422 r = gfx_v9_0_do_edc_gpr_workarounds(adev);
4429 static int gfx_v9_0_late_init(void *handle)
4431 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4434 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4438 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4442 r = gfx_v9_0_ecc_late_init(handle);
4449 static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
4451 uint32_t rlc_setting;
4453 /* if RLC is not enabled, do nothing */
4454 rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
4455 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
4461 static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev)
4466 data = RLC_SAFE_MODE__CMD_MASK;
4467 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
4468 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4470 /* wait for RLC_SAFE_MODE */
4471 for (i = 0; i < adev->usec_timeout; i++) {
4472 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
4478 static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev)
4482 data = RLC_SAFE_MODE__CMD_MASK;
4483 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4486 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
4489 amdgpu_gfx_rlc_enter_safe_mode(adev);
4491 if (is_support_sw_smu(adev) && !enable)
4492 smu_set_gfx_cgpg(&adev->smu, enable);
4494 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
4495 gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
4496 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4497 gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
4499 gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
4500 gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
4503 amdgpu_gfx_rlc_exit_safe_mode(adev);
4506 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
4509 /* TODO: double check if we need to perform under safe mode */
4510 /* gfx_v9_0_enter_rlc_safe_mode(adev); */
4512 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
4513 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
4515 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);
4517 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
4518 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
4520 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);
4522 /* gfx_v9_0_exit_rlc_safe_mode(adev); */
4525 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
4530 amdgpu_gfx_rlc_enter_safe_mode(adev);
4532 /* It is disabled by HW by default */
4533 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
4534 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
4535 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4537 if (adev->asic_type != CHIP_VEGA12)
4538 data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
4540 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4541 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4542 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4544 /* only for Vega10 & Raven1 */
4545 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
4548 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4550 /* MGLS is a global flag to control all MGLS in GFX */
4551 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
4552 /* 2 - RLC memory Light sleep */
4553 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
4554 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4555 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4557 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4559 /* 3 - CP memory Light sleep */
4560 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
4561 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4562 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4564 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4568 /* 1 - MGCG_OVERRIDE */
4569 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4571 if (adev->asic_type != CHIP_VEGA12)
4572 data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
4574 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
4575 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4576 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4577 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4580 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4582 /* 2 - disable MGLS in RLC */
4583 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4584 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
4585 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4586 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4589 /* 3 - disable MGLS in CP */
4590 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4591 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
4592 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4593 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4597 amdgpu_gfx_rlc_exit_safe_mode(adev);
4600 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
4605 if (adev->asic_type == CHIP_ARCTURUS)
4608 amdgpu_gfx_rlc_enter_safe_mode(adev);
4610 /* Enable 3D CGCG/CGLS */
4611 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
4612 /* write cmd to clear cgcg/cgls ov */
4613 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4614 /* unset CGCG override */
4615 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
4616 /* update CGCG and CGLS override bits */
4618 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4620 /* enable 3Dcgcg FSM(0x0000363f) */
4621 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4623 data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4624 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4625 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
4626 data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4627 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
4629 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4631 /* set IDLE_POLL_COUNT(0x00900100) */
4632 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
4633 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4634 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4636 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
4638 /* Disable CGCG/CGLS */
4639 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4640 /* disable cgcg, cgls should be disabled */
4641 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
4642 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
4643 /* disable cgcg and cgls in FSM */
4645 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4648 amdgpu_gfx_rlc_exit_safe_mode(adev);
4651 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
4656 amdgpu_gfx_rlc_enter_safe_mode(adev);
4658 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
4659 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4660 /* unset CGCG override */
4661 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
4662 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4663 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4665 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4666 /* update CGCG and CGLS override bits */
4668 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4670 /* enable cgcg FSM(0x0000363F) */
4671 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4673 if (adev->asic_type == CHIP_ARCTURUS)
4674 data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4675 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4677 data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4678 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4679 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4680 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4681 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
4683 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
4685 /* set IDLE_POLL_COUNT(0x00900100) */
4686 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
4687 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4688 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4690 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
4692 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4693 /* reset CGCG/CGLS bits */
4694 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
4695 /* disable cgcg and cgls in FSM */
4697 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
4700 amdgpu_gfx_rlc_exit_safe_mode(adev);
4703 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
4707 /* CGCG/CGLS should be enabled after MGCG/MGLS
4708 * === MGCG + MGLS ===
4710 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
4711 /* === CGCG /CGLS for GFX 3D Only === */
4712 gfx_v9_0_update_3d_clock_gating(adev, enable);
4713 /* === CGCG + CGLS === */
4714 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
4716 /* CGCG/CGLS should be disabled before MGCG/MGLS
4717 * === CGCG + CGLS ===
4719 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
4720 /* === CGCG /CGLS for GFX 3D Only === */
4721 gfx_v9_0_update_3d_clock_gating(adev, enable);
4722 /* === MGCG + MGLS === */
4723 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
4728 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
4729 .is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
4730 .set_safe_mode = gfx_v9_0_set_safe_mode,
4731 .unset_safe_mode = gfx_v9_0_unset_safe_mode,
4732 .init = gfx_v9_0_rlc_init,
4733 .get_csb_size = gfx_v9_0_get_csb_size,
4734 .get_csb_buffer = gfx_v9_0_get_csb_buffer,
4735 .get_cp_table_num = gfx_v9_0_cp_jump_table_num,
4736 .resume = gfx_v9_0_rlc_resume,
4737 .stop = gfx_v9_0_rlc_stop,
4738 .reset = gfx_v9_0_rlc_reset,
4739 .start = gfx_v9_0_rlc_start
4742 static int gfx_v9_0_set_powergating_state(void *handle,
4743 enum amd_powergating_state state)
4745 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4746 bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
4748 switch (adev->asic_type) {
4752 amdgpu_gfx_off_ctrl(adev, false);
4753 cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
4755 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
4756 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
4757 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
4759 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
4760 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
4763 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
4764 gfx_v9_0_enable_cp_power_gating(adev, true);
4766 gfx_v9_0_enable_cp_power_gating(adev, false);
4768 /* update gfx cgpg state */
4769 if (is_support_sw_smu(adev) && enable)
4770 smu_set_gfx_cgpg(&adev->smu, enable);
4771 gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
4773 /* update mgcg state */
4774 gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
4777 amdgpu_gfx_off_ctrl(adev, true);
4781 amdgpu_gfx_off_ctrl(adev, false);
4782 cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
4784 amdgpu_gfx_off_ctrl(adev, true);
4794 static int gfx_v9_0_set_clockgating_state(void *handle,
4795 enum amd_clockgating_state state)
4797 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4799 if (amdgpu_sriov_vf(adev))
4802 switch (adev->asic_type) {
4809 gfx_v9_0_update_gfx_clock_gating(adev,
4810 state == AMD_CG_STATE_GATE ? true : false);
4818 static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
4820 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4823 if (amdgpu_sriov_vf(adev))
4826 /* AMD_CG_SUPPORT_GFX_MGCG */
4827 data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4828 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
4829 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
4831 /* AMD_CG_SUPPORT_GFX_CGCG */
4832 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4833 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
4834 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
4836 /* AMD_CG_SUPPORT_GFX_CGLS */
4837 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
4838 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
4840 /* AMD_CG_SUPPORT_GFX_RLC_LS */
4841 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4842 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
4843 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
4845 /* AMD_CG_SUPPORT_GFX_CP_LS */
4846 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4847 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
4848 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
4850 if (adev->asic_type != CHIP_ARCTURUS) {
4851 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
4852 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4853 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
4854 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
4856 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
4857 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
4858 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
4862 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
4864 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 is 32bit rptr*/
4867 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
4869 struct amdgpu_device *adev = ring->adev;
4872 /* XXX check if swapping is necessary on BE */
4873 if (ring->use_doorbell) {
4874 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
4876 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
4877 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
4883 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
4885 struct amdgpu_device *adev = ring->adev;
4887 if (ring->use_doorbell) {
4888 /* XXX check if swapping is necessary on BE */
4889 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
4890 WDOORBELL64(ring->doorbell_index, ring->wptr);
4892 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
4893 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
4897 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
4899 struct amdgpu_device *adev = ring->adev;
4900 u32 ref_and_mask, reg_mem_engine;
4901 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
4903 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
4906 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
4909 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
4916 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
4917 reg_mem_engine = 1; /* pfp */
4920 gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
4921 adev->nbio.funcs->get_hdp_flush_req_offset(adev),
4922 adev->nbio.funcs->get_hdp_flush_done_offset(adev),
4923 ref_and_mask, ref_and_mask, 0x20);
4926 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
4927 struct amdgpu_job *job,
4928 struct amdgpu_ib *ib,
4931 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
4932 u32 header, control = 0;
4934 if (ib->flags & AMDGPU_IB_FLAG_CE)
4935 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
4937 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
4939 control |= ib->length_dw | (vmid << 24);
4941 if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
4942 control |= INDIRECT_BUFFER_PRE_ENB(1);
4944 if (!(ib->flags & AMDGPU_IB_FLAG_CE))
4945 gfx_v9_0_ring_emit_de_meta(ring);
4948 amdgpu_ring_write(ring, header);
4949 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
4950 amdgpu_ring_write(ring,
4954 lower_32_bits(ib->gpu_addr));
4955 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4956 amdgpu_ring_write(ring, control);
4959 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
4960 struct amdgpu_job *job,
4961 struct amdgpu_ib *ib,
4964 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
4965 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
4967 /* Currently, there is a high possibility to get wave ID mismatch
4968 * between ME and GDS, leading to a hw deadlock, because ME generates
4969 * different wave IDs than the GDS expects. This situation happens
4970 * randomly when at least 5 compute pipes use GDS ordered append.
4971 * The wave IDs generated by ME are also wrong after suspend/resume.
4972 * Those are probably bugs somewhere else in the kernel driver.
4974 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
4975 * GDS to 0 for this ring (me/pipe).
4977 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
4978 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
4979 amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
4980 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
4983 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
4984 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
4985 amdgpu_ring_write(ring,
4989 lower_32_bits(ib->gpu_addr));
4990 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4991 amdgpu_ring_write(ring, control);
4994 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
4995 u64 seq, unsigned flags)
4997 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
4998 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
4999 bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
5001 /* RELEASE_MEM - flush caches, send int */
5002 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
5003 amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
5004 EOP_TC_NC_ACTION_EN) :
5005 (EOP_TCL1_ACTION_EN |
5007 EOP_TC_WB_ACTION_EN |
5008 EOP_TC_MD_ACTION_EN)) |
5009 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
5011 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
5014 * the address should be Qword aligned if 64bit write, Dword
5015 * aligned if only send 32bit data low (discard data high)
5021 amdgpu_ring_write(ring, lower_32_bits(addr));
5022 amdgpu_ring_write(ring, upper_32_bits(addr));
5023 amdgpu_ring_write(ring, lower_32_bits(seq));
5024 amdgpu_ring_write(ring, upper_32_bits(seq));
5025 amdgpu_ring_write(ring, 0);
5028 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
5030 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5031 uint32_t seq = ring->fence_drv.sync_seq;
5032 uint64_t addr = ring->fence_drv.gpu_addr;
5034 gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
5035 lower_32_bits(addr), upper_32_bits(addr),
5036 seq, 0xffffffff, 4);
5039 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
5040 unsigned vmid, uint64_t pd_addr)
5042 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
5044 /* compute doesn't have PFP */
5045 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
5046 /* sync PFP to ME, otherwise we might get invalid PFP reads */
5047 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5048 amdgpu_ring_write(ring, 0x0);
5052 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
5054 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
5057 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
5061 /* XXX check if swapping is necessary on BE */
5062 if (ring->use_doorbell)
5063 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
5069 static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
5072 struct amdgpu_device *adev = ring->adev;
5073 int pipe_num, tmp, reg;
5074 int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
5076 pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
5078 /* first me only has 2 entries, GFX and HP3D */
5082 reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num;
5084 tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
5088 static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev,
5089 struct amdgpu_ring *ring,
5094 struct amdgpu_ring *iring;
5096 mutex_lock(&adev->gfx.pipe_reserve_mutex);
5097 pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0);
5099 set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
5101 clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
5103 if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
5104 /* Clear all reservations - everyone reacquires all resources */
5105 for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
5106 gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
5109 for (i = 0; i < adev->gfx.num_compute_rings; ++i)
5110 gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
5113 /* Lower all pipes without a current reservation */
5114 for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
5115 iring = &adev->gfx.gfx_ring[i];
5116 pipe = amdgpu_gfx_mec_queue_to_bit(adev,
5120 reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
5121 gfx_v9_0_ring_set_pipe_percent(iring, reserve);
5124 for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
5125 iring = &adev->gfx.compute_ring[i];
5126 pipe = amdgpu_gfx_mec_queue_to_bit(adev,
5130 reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
5131 gfx_v9_0_ring_set_pipe_percent(iring, reserve);
5135 mutex_unlock(&adev->gfx.pipe_reserve_mutex);
5138 static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev,
5139 struct amdgpu_ring *ring,
5142 uint32_t pipe_priority = acquire ? 0x2 : 0x0;
5143 uint32_t queue_priority = acquire ? 0xf : 0x0;
5145 mutex_lock(&adev->srbm_mutex);
5146 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
5148 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority);
5149 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority);
5151 soc15_grbm_select(adev, 0, 0, 0, 0);
5152 mutex_unlock(&adev->srbm_mutex);
5155 static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring,
5156 enum drm_sched_priority priority)
5158 struct amdgpu_device *adev = ring->adev;
5159 bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
5161 if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
5164 gfx_v9_0_hqd_set_priority(adev, ring, acquire);
5165 gfx_v9_0_pipe_reserve_resources(adev, ring, acquire);
5168 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
5170 struct amdgpu_device *adev = ring->adev;
5172 /* XXX check if swapping is necessary on BE */
5173 if (ring->use_doorbell) {
5174 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
5175 WDOORBELL64(ring->doorbell_index, ring->wptr);
5177 BUG(); /* only DOORBELL method supported on gfx9 now */
5181 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
5182 u64 seq, unsigned int flags)
5184 struct amdgpu_device *adev = ring->adev;
5186 /* we only allocate 32bit for each seq wb address */
5187 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
5189 /* write fence seq to the "addr" */
5190 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5191 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5192 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
5193 amdgpu_ring_write(ring, lower_32_bits(addr));
5194 amdgpu_ring_write(ring, upper_32_bits(addr));
5195 amdgpu_ring_write(ring, lower_32_bits(seq));
5197 if (flags & AMDGPU_FENCE_FLAG_INT) {
5198 /* set register to trigger INT */
5199 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5200 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5201 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
5202 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
5203 amdgpu_ring_write(ring, 0);
5204 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
5208 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
5210 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
5211 amdgpu_ring_write(ring, 0);
5214 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
5216 struct v9_ce_ib_state ce_payload = {0};
5220 cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
5221 csa_addr = amdgpu_csa_vaddr(ring->adev);
5223 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5224 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
5225 WRITE_DATA_DST_SEL(8) |
5227 WRITE_DATA_CACHE_POLICY(0));
5228 amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
5229 amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
5230 amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2);
5233 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
5235 struct v9_de_ib_state de_payload = {0};
5236 uint64_t csa_addr, gds_addr;
5239 csa_addr = amdgpu_csa_vaddr(ring->adev);
5240 gds_addr = csa_addr + 4096;
5241 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
5242 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
5244 cnt = (sizeof(de_payload) >> 2) + 4 - 2;
5245 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5246 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5247 WRITE_DATA_DST_SEL(8) |
5249 WRITE_DATA_CACHE_POLICY(0));
5250 amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
5251 amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
5252 amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
5255 static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
5257 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
5258 amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
5261 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
5265 if (amdgpu_sriov_vf(ring->adev))
5266 gfx_v9_0_ring_emit_ce_meta(ring);
5268 gfx_v9_0_ring_emit_tmz(ring, true);
5270 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
5271 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
5272 /* set load_global_config & load_global_uconfig */
5274 /* set load_cs_sh_regs */
5276 /* set load_per_context_state & load_gfx_sh_regs for GFX */
5279 /* set load_ce_ram if preamble presented */
5280 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
5283 /* still load_ce_ram if this is the first time preamble presented
5284 * although there is no context switch happens.
5286 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
5290 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5291 amdgpu_ring_write(ring, dw2);
5292 amdgpu_ring_write(ring, 0);
5295 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
5298 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
5299 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
5300 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
5301 amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
5302 ret = ring->wptr & ring->buf_mask;
5303 amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
5307 static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
5310 BUG_ON(offset > ring->buf_mask);
5311 BUG_ON(ring->ring[offset] != 0x55aa55aa);
5313 cur = (ring->wptr & ring->buf_mask) - 1;
5314 if (likely(cur > offset))
5315 ring->ring[offset] = cur - offset;
5317 ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
5320 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
5322 struct amdgpu_device *adev = ring->adev;
5324 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
5325 amdgpu_ring_write(ring, 0 | /* src: register*/
5326 (5 << 8) | /* dst: memory */
5327 (1 << 20)); /* write confirm */
5328 amdgpu_ring_write(ring, reg);
5329 amdgpu_ring_write(ring, 0);
5330 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
5331 adev->virt.reg_val_offs * 4));
5332 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
5333 adev->virt.reg_val_offs * 4));
5336 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
5341 switch (ring->funcs->type) {
5342 case AMDGPU_RING_TYPE_GFX:
5343 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
5345 case AMDGPU_RING_TYPE_KIQ:
5346 cmd = (1 << 16); /* no inc addr */
5352 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5353 amdgpu_ring_write(ring, cmd);
5354 amdgpu_ring_write(ring, reg);
5355 amdgpu_ring_write(ring, 0);
5356 amdgpu_ring_write(ring, val);
5359 static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
5360 uint32_t val, uint32_t mask)
5362 gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
5365 static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
5366 uint32_t reg0, uint32_t reg1,
5367 uint32_t ref, uint32_t mask)
5369 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5370 struct amdgpu_device *adev = ring->adev;
5371 bool fw_version_ok = (ring->funcs->type == AMDGPU_RING_TYPE_GFX) ?
5372 adev->gfx.me_fw_write_wait : adev->gfx.mec_fw_write_wait;
5375 gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
5378 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
5382 static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
5384 struct amdgpu_device *adev = ring->adev;
5387 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
5388 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
5389 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
5390 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
5391 WREG32_SOC15(GC, 0, mmSQ_CMD, value);
5394 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
5395 enum amdgpu_interrupt_state state)
5398 case AMDGPU_IRQ_STATE_DISABLE:
5399 case AMDGPU_IRQ_STATE_ENABLE:
5400 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5401 TIME_STAMP_INT_ENABLE,
5402 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5409 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
5411 enum amdgpu_interrupt_state state)
5413 u32 mec_int_cntl, mec_int_cntl_reg;
5416 * amdgpu controls only the first MEC. That's why this function only
5417 * handles the setting of interrupts for this specific MEC. All other
5418 * pipes' interrupts are set by amdkfd.
5424 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
5427 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
5430 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
5433 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
5436 DRM_DEBUG("invalid pipe %d\n", pipe);
5440 DRM_DEBUG("invalid me %d\n", me);
5445 case AMDGPU_IRQ_STATE_DISABLE:
5446 mec_int_cntl = RREG32(mec_int_cntl_reg);
5447 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5448 TIME_STAMP_INT_ENABLE, 0);
5449 WREG32(mec_int_cntl_reg, mec_int_cntl);
5451 case AMDGPU_IRQ_STATE_ENABLE:
5452 mec_int_cntl = RREG32(mec_int_cntl_reg);
5453 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5454 TIME_STAMP_INT_ENABLE, 1);
5455 WREG32(mec_int_cntl_reg, mec_int_cntl);
5462 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
5463 struct amdgpu_irq_src *source,
5465 enum amdgpu_interrupt_state state)
5468 case AMDGPU_IRQ_STATE_DISABLE:
5469 case AMDGPU_IRQ_STATE_ENABLE:
5470 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5471 PRIV_REG_INT_ENABLE,
5472 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5481 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
5482 struct amdgpu_irq_src *source,
5484 enum amdgpu_interrupt_state state)
5487 case AMDGPU_IRQ_STATE_DISABLE:
5488 case AMDGPU_IRQ_STATE_ENABLE:
5489 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5490 PRIV_INSTR_INT_ENABLE,
5491 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5499 #define ENABLE_ECC_ON_ME_PIPE(me, pipe) \
5500 WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
5501 CP_ECC_ERROR_INT_ENABLE, 1)
5503 #define DISABLE_ECC_ON_ME_PIPE(me, pipe) \
5504 WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
5505 CP_ECC_ERROR_INT_ENABLE, 0)
5507 static int gfx_v9_0_set_cp_ecc_error_state(struct amdgpu_device *adev,
5508 struct amdgpu_irq_src *source,
5510 enum amdgpu_interrupt_state state)
5513 case AMDGPU_IRQ_STATE_DISABLE:
5514 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5515 CP_ECC_ERROR_INT_ENABLE, 0);
5516 DISABLE_ECC_ON_ME_PIPE(1, 0);
5517 DISABLE_ECC_ON_ME_PIPE(1, 1);
5518 DISABLE_ECC_ON_ME_PIPE(1, 2);
5519 DISABLE_ECC_ON_ME_PIPE(1, 3);
5522 case AMDGPU_IRQ_STATE_ENABLE:
5523 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5524 CP_ECC_ERROR_INT_ENABLE, 1);
5525 ENABLE_ECC_ON_ME_PIPE(1, 0);
5526 ENABLE_ECC_ON_ME_PIPE(1, 1);
5527 ENABLE_ECC_ON_ME_PIPE(1, 2);
5528 ENABLE_ECC_ON_ME_PIPE(1, 3);
5538 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
5539 struct amdgpu_irq_src *src,
5541 enum amdgpu_interrupt_state state)
5544 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
5545 gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
5547 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
5548 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
5550 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
5551 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
5553 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
5554 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
5556 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
5557 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
5559 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
5560 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
5562 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
5563 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
5565 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
5566 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
5568 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
5569 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
5577 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
5578 struct amdgpu_irq_src *source,
5579 struct amdgpu_iv_entry *entry)
5582 u8 me_id, pipe_id, queue_id;
5583 struct amdgpu_ring *ring;
5585 DRM_DEBUG("IH: CP EOP\n");
5586 me_id = (entry->ring_id & 0x0c) >> 2;
5587 pipe_id = (entry->ring_id & 0x03) >> 0;
5588 queue_id = (entry->ring_id & 0x70) >> 4;
5592 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
5596 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5597 ring = &adev->gfx.compute_ring[i];
5598 /* Per-queue interrupt is supported for MEC starting from VI.
5599 * The interrupt can only be enabled/disabled per pipe instead of per queue.
5601 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
5602 amdgpu_fence_process(ring);
5609 static void gfx_v9_0_fault(struct amdgpu_device *adev,
5610 struct amdgpu_iv_entry *entry)
5612 u8 me_id, pipe_id, queue_id;
5613 struct amdgpu_ring *ring;
5616 me_id = (entry->ring_id & 0x0c) >> 2;
5617 pipe_id = (entry->ring_id & 0x03) >> 0;
5618 queue_id = (entry->ring_id & 0x70) >> 4;
5622 drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
5626 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5627 ring = &adev->gfx.compute_ring[i];
5628 if (ring->me == me_id && ring->pipe == pipe_id &&
5629 ring->queue == queue_id)
5630 drm_sched_fault(&ring->sched);
5636 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
5637 struct amdgpu_irq_src *source,
5638 struct amdgpu_iv_entry *entry)
5640 DRM_ERROR("Illegal register access in command stream\n");
5641 gfx_v9_0_fault(adev, entry);
5645 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
5646 struct amdgpu_irq_src *source,
5647 struct amdgpu_iv_entry *entry)
5649 DRM_ERROR("Illegal instruction in command stream\n");
5650 gfx_v9_0_fault(adev, entry);
5654 static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
5655 struct ras_err_data *err_data,
5656 struct amdgpu_iv_entry *entry)
5658 /* TODO ue will trigger an interrupt. */
5659 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
5660 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
5661 if (adev->gfx.funcs->query_ras_error_count)
5662 adev->gfx.funcs->query_ras_error_count(adev, err_data);
5663 amdgpu_ras_reset_gpu(adev, 0);
5665 return AMDGPU_RAS_SUCCESS;
5668 static const struct {
5673 uint32_t reg_offset;
5674 uint32_t per_se_instance;
5675 int32_t num_instance;
5676 uint32_t sec_count_mask;
5677 uint32_t ded_count_mask;
5678 } gfx_ras_edc_regs[] = {
5679 { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1,
5680 REG_FIELD_MASK(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
5681 REG_FIELD_MASK(CPC_EDC_SCRATCH_CNT, DED_COUNT) },
5682 { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1,
5683 REG_FIELD_MASK(CPC_EDC_UCODE_CNT, SEC_COUNT),
5684 REG_FIELD_MASK(CPC_EDC_UCODE_CNT, DED_COUNT) },
5685 { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1,
5686 REG_FIELD_MASK(CPF_EDC_ROQ_CNT, COUNT_ME1), 0 },
5687 { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1,
5688 REG_FIELD_MASK(CPF_EDC_ROQ_CNT, COUNT_ME2), 0 },
5689 { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1,
5690 REG_FIELD_MASK(CPF_EDC_TAG_CNT, SEC_COUNT),
5691 REG_FIELD_MASK(CPF_EDC_TAG_CNT, DED_COUNT) },
5692 { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1,
5693 REG_FIELD_MASK(CPG_EDC_DMA_CNT, ROQ_COUNT), 0 },
5694 { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1,
5695 REG_FIELD_MASK(CPG_EDC_DMA_CNT, TAG_SEC_COUNT),
5696 REG_FIELD_MASK(CPG_EDC_DMA_CNT, TAG_DED_COUNT) },
5697 { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), 0, 1,
5698 REG_FIELD_MASK(CPG_EDC_TAG_CNT, SEC_COUNT),
5699 REG_FIELD_MASK(CPG_EDC_TAG_CNT, DED_COUNT) },
5700 { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1,
5701 REG_FIELD_MASK(DC_EDC_CSINVOC_CNT, COUNT_ME1), 0 },
5702 { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1,
5703 REG_FIELD_MASK(DC_EDC_RESTORE_CNT, COUNT_ME1), 0 },
5704 { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1,
5705 REG_FIELD_MASK(DC_EDC_STATE_CNT, COUNT_ME1), 0 },
5706 { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1,
5707 REG_FIELD_MASK(GDS_EDC_CNT, GDS_MEM_SEC),
5708 REG_FIELD_MASK(GDS_EDC_CNT, GDS_MEM_DED) },
5709 { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1,
5710 REG_FIELD_MASK(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED), 0 },
5711 { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
5712 0, 1, REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
5713 REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED) },
5714 { "GDS_OA_PHY_PHY_CMD_RAM_MEM",
5715 SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1,
5716 REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
5717 REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED) },
5718 { "GDS_OA_PHY_PHY_DATA_RAM_MEM",
5719 SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1,
5720 REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED), 0 },
5721 { "GDS_OA_PIPE_ME1_PIPE0_PIPE_MEM",
5722 SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1,
5723 REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
5724 REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED) },
5725 { "GDS_OA_PIPE_ME1_PIPE1_PIPE_MEM",
5726 SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1,
5727 REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
5728 REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED) },
5729 { "GDS_OA_PIPE_ME1_PIPE2_PIPE_MEM",
5730 SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1,
5731 REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
5732 REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED) },
5733 { "GDS_OA_PIPE_ME1_PIPE3_PIPE_MEM",
5734 SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1,
5735 REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
5736 REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED) },
5737 { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 1, 1,
5738 REG_FIELD_MASK(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT), 0 },
5739 { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
5740 REG_FIELD_MASK(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
5741 REG_FIELD_MASK(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT) },
5742 { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
5743 REG_FIELD_MASK(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT), 0 },
5744 { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
5745 REG_FIELD_MASK(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT), 0 },
5746 { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
5747 REG_FIELD_MASK(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT), 0 },
5748 { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
5749 REG_FIELD_MASK(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT), 0 },
5750 { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 2,
5751 REG_FIELD_MASK(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT), 0 },
5752 { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 2,
5753 REG_FIELD_MASK(TCA_EDC_CNT, REQ_FIFO_SED_COUNT), 0 },
5754 { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
5755 REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
5756 REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DATA_DED_COUNT) },
5757 { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
5758 REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
5759 REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT) },
5760 { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
5761 REG_FIELD_MASK(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
5762 REG_FIELD_MASK(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT) },
5763 { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
5764 REG_FIELD_MASK(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
5765 REG_FIELD_MASK(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT) },
5766 { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
5767 REG_FIELD_MASK(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
5768 REG_FIELD_MASK(TCC_EDC_CNT, SRC_FIFO_DED_COUNT) },
5769 { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
5770 REG_FIELD_MASK(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT), 0 },
5771 { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
5772 REG_FIELD_MASK(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT), 0 },
5773 { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
5774 REG_FIELD_MASK(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT), 0 },
5775 { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
5776 REG_FIELD_MASK(TCC_EDC_CNT, RETURN_DATA_SED_COUNT), 0 },
5777 { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
5778 REG_FIELD_MASK(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT), 0 },
5779 { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
5780 REG_FIELD_MASK(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT), 0 },
5781 { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 16,
5782 REG_FIELD_MASK(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT), 0 },
5783 { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 16,
5784 REG_FIELD_MASK(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT), 0 },
5785 { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0,
5786 16, REG_FIELD_MASK(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT), 0 },
5787 { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
5788 0, 16, REG_FIELD_MASK(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT),
5790 { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0,
5791 16, REG_FIELD_MASK(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT), 0 },
5792 { "TCC_WRRET_TAG_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
5793 0, 16, REG_FIELD_MASK(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT),
5795 { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0,
5796 16, REG_FIELD_MASK(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT), 0 },
5797 { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 72,
5798 REG_FIELD_MASK(TCI_EDC_CNT, WRITE_RAM_SED_COUNT), 0 },
5799 { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
5800 REG_FIELD_MASK(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
5801 REG_FIELD_MASK(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT) },
5802 { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
5803 REG_FIELD_MASK(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
5804 REG_FIELD_MASK(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT) },
5805 { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
5806 REG_FIELD_MASK(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT), 0 },
5807 { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
5808 REG_FIELD_MASK(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT), 0 },
5809 { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
5810 REG_FIELD_MASK(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT), 0 },
5811 { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
5812 REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
5813 REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT) },
5814 { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
5815 REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
5816 REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT) },
5817 { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16,
5818 REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
5819 REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT) },
5820 { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16,
5821 REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
5822 REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT) },
5823 { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16,
5824 REG_FIELD_MASK(TD_EDC_CNT, CS_FIFO_SED_COUNT), 0 },
5825 { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
5826 REG_FIELD_MASK(SQ_EDC_CNT, LDS_D_SEC_COUNT),
5827 REG_FIELD_MASK(SQ_EDC_CNT, LDS_D_DED_COUNT) },
5828 { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
5829 REG_FIELD_MASK(SQ_EDC_CNT, LDS_I_SEC_COUNT),
5830 REG_FIELD_MASK(SQ_EDC_CNT, LDS_I_DED_COUNT) },
5831 { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
5832 REG_FIELD_MASK(SQ_EDC_CNT, SGPR_SEC_COUNT),
5833 REG_FIELD_MASK(SQ_EDC_CNT, SGPR_DED_COUNT) },
5834 { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
5835 REG_FIELD_MASK(SQ_EDC_CNT, VGPR0_SEC_COUNT),
5836 REG_FIELD_MASK(SQ_EDC_CNT, VGPR0_DED_COUNT) },
5837 { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
5838 REG_FIELD_MASK(SQ_EDC_CNT, VGPR1_SEC_COUNT),
5839 REG_FIELD_MASK(SQ_EDC_CNT, VGPR1_DED_COUNT) },
5840 { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
5841 REG_FIELD_MASK(SQ_EDC_CNT, VGPR2_SEC_COUNT),
5842 REG_FIELD_MASK(SQ_EDC_CNT, VGPR2_DED_COUNT) },
5843 { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
5844 REG_FIELD_MASK(SQ_EDC_CNT, VGPR3_SEC_COUNT),
5845 REG_FIELD_MASK(SQ_EDC_CNT, VGPR3_DED_COUNT) },
5846 { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
5847 1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
5848 REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT) },
5849 { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1,
5850 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
5851 REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT) },
5852 { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
5853 1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
5854 REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT) },
5855 { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1,
5856 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
5857 REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT) },
5858 { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
5859 1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
5860 REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT) },
5861 { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1,
5862 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
5863 REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT) },
5864 { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
5865 6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
5866 REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT) },
5867 { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
5868 6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
5869 REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT) },
5870 { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
5871 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
5872 REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT) },
5873 { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
5874 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
5875 REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT) },
5876 { "SQC_INST_BANKA_UTCL1_MISS_FIFO",
5877 SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6,
5878 REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT),
5880 { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
5881 6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT), 0 },
5882 { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
5883 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT), 0 },
5884 { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
5885 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT), 0 },
5886 { "SQC_DATA_BANKA_DIRTY_BIT_RAM",
5887 SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6,
5888 REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT), 0 },
5889 { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6,
5890 REG_FIELD_MASK(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
5891 REG_FIELD_MASK(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT) },
5892 { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
5893 6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
5894 REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT) },
5895 { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
5896 6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
5897 REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT) },
5898 { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
5899 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
5900 REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT) },
5901 { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
5902 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
5903 REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT) },
5904 { "SQC_INST_BANKB_UTCL1_MISS_FIFO",
5905 SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, 6,
5906 REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT),
5908 { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
5909 6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT), 0 },
5910 { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
5911 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT), 0 },
5912 { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
5913 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT), 0 },
5914 { "SQC_DATA_BANKB_DIRTY_BIT_RAM",
5915 SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, 6,
5916 REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT), 0 },
5917 { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
5918 REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
5919 REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT) },
5920 { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
5921 REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
5922 REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT) },
5923 { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
5924 REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
5925 REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT) },
5926 { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
5927 REG_FIELD_MASK(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
5928 REG_FIELD_MASK(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT) },
5929 { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
5930 REG_FIELD_MASK(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
5931 REG_FIELD_MASK(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT) },
5932 { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
5933 REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 0 },
5934 { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
5935 REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 0 },
5936 { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
5937 REG_FIELD_MASK(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT), 0 },
5938 { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
5939 REG_FIELD_MASK(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 0 },
5940 { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
5941 REG_FIELD_MASK(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT), 0 },
5942 { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
5943 REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
5944 REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT) },
5945 { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
5946 REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
5947 REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT) },
5948 { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
5949 REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
5950 REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT) },
5951 { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
5952 REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 0 },
5953 { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
5954 REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 0 },
5955 { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
5956 REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT), 0 },
5957 { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
5958 REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT), 0 },
5959 { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
5960 REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT), 0 },
5961 { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
5962 REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT), 0 },
5965 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
5968 struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
5970 struct ta_ras_trigger_error_input block_info = { 0 };
5972 if (adev->asic_type != CHIP_VEGA20)
5975 if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks))
5978 if (!ras_gfx_subblocks[info->head.sub_block_index].name)
5981 if (!(ras_gfx_subblocks[info->head.sub_block_index].hw_supported_error_type &
5983 DRM_ERROR("GFX Subblock %s, hardware do not support type 0x%x\n",
5984 ras_gfx_subblocks[info->head.sub_block_index].name,
5989 if (!(ras_gfx_subblocks[info->head.sub_block_index].sw_supported_error_type &
5991 DRM_ERROR("GFX Subblock %s, driver do not support type 0x%x\n",
5992 ras_gfx_subblocks[info->head.sub_block_index].name,
5997 block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
5998 block_info.sub_block_index =
5999 ras_gfx_subblocks[info->head.sub_block_index].ta_subblock;
6000 block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
6001 block_info.address = info->address;
6002 block_info.value = info->value;
6004 mutex_lock(&adev->grbm_idx_mutex);
6005 ret = psp_ras_trigger_error(&adev->psp, &block_info);
6006 mutex_unlock(&adev->grbm_idx_mutex);
6011 static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
6012 void *ras_error_status)
6014 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
6015 uint32_t sec_count, ded_count;
6018 uint32_t se_id, instance_id;
6020 if (adev->asic_type != CHIP_VEGA20)
6023 err_data->ue_count = 0;
6024 err_data->ce_count = 0;
6026 mutex_lock(&adev->grbm_idx_mutex);
6027 for (se_id = 0; se_id < adev->gfx.config.max_shader_engines; se_id++) {
6028 for (instance_id = 0; instance_id < 256; instance_id++) {
6030 i < sizeof(gfx_ras_edc_regs) / sizeof(gfx_ras_edc_regs[0]);
6033 !gfx_ras_edc_regs[i].per_se_instance)
6035 if (instance_id >= gfx_ras_edc_regs[i].num_instance)
6038 gfx_v9_0_select_se_sh(adev, se_id, 0,
6042 adev->reg_offset[gfx_ras_edc_regs[i].ip]
6043 [gfx_ras_edc_regs[i].inst]
6044 [gfx_ras_edc_regs[i].seg] +
6045 gfx_ras_edc_regs[i].reg_offset);
6046 sec_count = reg_value &
6047 gfx_ras_edc_regs[i].sec_count_mask;
6048 ded_count = reg_value &
6049 gfx_ras_edc_regs[i].ded_count_mask;
6052 "Instance[%d][%d]: SubBlock %s, SEC %d\n",
6054 gfx_ras_edc_regs[i].name,
6056 err_data->ce_count++;
6061 "Instance[%d][%d]: SubBlock %s, DED %d\n",
6063 gfx_ras_edc_regs[i].name,
6065 err_data->ue_count++;
6070 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
6071 mutex_unlock(&adev->grbm_idx_mutex);
6076 static int gfx_v9_0_cp_ecc_error_irq(struct amdgpu_device *adev,
6077 struct amdgpu_irq_src *source,
6078 struct amdgpu_iv_entry *entry)
6080 struct ras_common_if *ras_if = adev->gfx.ras_if;
6081 struct ras_dispatch_if ih_data = {
6088 ih_data.head = *ras_if;
6090 DRM_ERROR("CP ECC ERROR IRQ\n");
6091 amdgpu_ras_interrupt_dispatch(adev, &ih_data);
6095 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
6097 .early_init = gfx_v9_0_early_init,
6098 .late_init = gfx_v9_0_late_init,
6099 .sw_init = gfx_v9_0_sw_init,
6100 .sw_fini = gfx_v9_0_sw_fini,
6101 .hw_init = gfx_v9_0_hw_init,
6102 .hw_fini = gfx_v9_0_hw_fini,
6103 .suspend = gfx_v9_0_suspend,
6104 .resume = gfx_v9_0_resume,
6105 .is_idle = gfx_v9_0_is_idle,
6106 .wait_for_idle = gfx_v9_0_wait_for_idle,
6107 .soft_reset = gfx_v9_0_soft_reset,
6108 .set_clockgating_state = gfx_v9_0_set_clockgating_state,
6109 .set_powergating_state = gfx_v9_0_set_powergating_state,
6110 .get_clockgating_state = gfx_v9_0_get_clockgating_state,
6113 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
6114 .type = AMDGPU_RING_TYPE_GFX,
6116 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6117 .support_64bit_ptrs = true,
6118 .vmhub = AMDGPU_GFXHUB_0,
6119 .get_rptr = gfx_v9_0_ring_get_rptr_gfx,
6120 .get_wptr = gfx_v9_0_ring_get_wptr_gfx,
6121 .set_wptr = gfx_v9_0_ring_set_wptr_gfx,
6122 .emit_frame_size = /* totally 242 maximum if 16 IBs */
6124 7 + /* PIPELINE_SYNC */
6125 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6126 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6128 8 + /* FENCE for VM_FLUSH */
6129 20 + /* GDS switch */
6130 4 + /* double SWITCH_BUFFER,
6131 the first COND_EXEC jump to the place just
6132 prior to this double SWITCH_BUFFER */
6140 8 + 8 + /* FENCE x2 */
6141 2, /* SWITCH_BUFFER */
6142 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
6143 .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
6144 .emit_fence = gfx_v9_0_ring_emit_fence,
6145 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
6146 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
6147 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
6148 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
6149 .test_ring = gfx_v9_0_ring_test_ring,
6150 .test_ib = gfx_v9_0_ring_test_ib,
6151 .insert_nop = amdgpu_ring_insert_nop,
6152 .pad_ib = amdgpu_ring_generic_pad_ib,
6153 .emit_switch_buffer = gfx_v9_ring_emit_sb,
6154 .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
6155 .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
6156 .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
6157 .emit_tmz = gfx_v9_0_ring_emit_tmz,
6158 .emit_wreg = gfx_v9_0_ring_emit_wreg,
6159 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6160 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6161 .soft_recovery = gfx_v9_0_ring_soft_recovery,
6164 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
6165 .type = AMDGPU_RING_TYPE_COMPUTE,
6167 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6168 .support_64bit_ptrs = true,
6169 .vmhub = AMDGPU_GFXHUB_0,
6170 .get_rptr = gfx_v9_0_ring_get_rptr_compute,
6171 .get_wptr = gfx_v9_0_ring_get_wptr_compute,
6172 .set_wptr = gfx_v9_0_ring_set_wptr_compute,
6174 20 + /* gfx_v9_0_ring_emit_gds_switch */
6175 7 + /* gfx_v9_0_ring_emit_hdp_flush */
6176 5 + /* hdp invalidate */
6177 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
6178 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6179 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6180 2 + /* gfx_v9_0_ring_emit_vm_flush */
6181 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
6182 .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
6183 .emit_ib = gfx_v9_0_ring_emit_ib_compute,
6184 .emit_fence = gfx_v9_0_ring_emit_fence,
6185 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
6186 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
6187 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
6188 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
6189 .test_ring = gfx_v9_0_ring_test_ring,
6190 .test_ib = gfx_v9_0_ring_test_ib,
6191 .insert_nop = amdgpu_ring_insert_nop,
6192 .pad_ib = amdgpu_ring_generic_pad_ib,
6193 .set_priority = gfx_v9_0_ring_set_priority_compute,
6194 .emit_wreg = gfx_v9_0_ring_emit_wreg,
6195 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6196 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6199 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
6200 .type = AMDGPU_RING_TYPE_KIQ,
6202 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6203 .support_64bit_ptrs = true,
6204 .vmhub = AMDGPU_GFXHUB_0,
6205 .get_rptr = gfx_v9_0_ring_get_rptr_compute,
6206 .get_wptr = gfx_v9_0_ring_get_wptr_compute,
6207 .set_wptr = gfx_v9_0_ring_set_wptr_compute,
6209 20 + /* gfx_v9_0_ring_emit_gds_switch */
6210 7 + /* gfx_v9_0_ring_emit_hdp_flush */
6211 5 + /* hdp invalidate */
6212 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
6213 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6214 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6215 2 + /* gfx_v9_0_ring_emit_vm_flush */
6216 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
6217 .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
6218 .emit_fence = gfx_v9_0_ring_emit_fence_kiq,
6219 .test_ring = gfx_v9_0_ring_test_ring,
6220 .insert_nop = amdgpu_ring_insert_nop,
6221 .pad_ib = amdgpu_ring_generic_pad_ib,
6222 .emit_rreg = gfx_v9_0_ring_emit_rreg,
6223 .emit_wreg = gfx_v9_0_ring_emit_wreg,
6224 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6225 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6228 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
6232 adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
6234 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
6235 adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
6237 for (i = 0; i < adev->gfx.num_compute_rings; i++)
6238 adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
6241 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
6242 .set = gfx_v9_0_set_eop_interrupt_state,
6243 .process = gfx_v9_0_eop_irq,
6246 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
6247 .set = gfx_v9_0_set_priv_reg_fault_state,
6248 .process = gfx_v9_0_priv_reg_irq,
6251 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
6252 .set = gfx_v9_0_set_priv_inst_fault_state,
6253 .process = gfx_v9_0_priv_inst_irq,
6256 static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = {
6257 .set = gfx_v9_0_set_cp_ecc_error_state,
6258 .process = gfx_v9_0_cp_ecc_error_irq,
6262 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
6264 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
6265 adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
6267 adev->gfx.priv_reg_irq.num_types = 1;
6268 adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
6270 adev->gfx.priv_inst_irq.num_types = 1;
6271 adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
6273 adev->gfx.cp_ecc_error_irq.num_types = 2; /*C5 ECC error and C9 FUE error*/
6274 adev->gfx.cp_ecc_error_irq.funcs = &gfx_v9_0_cp_ecc_error_irq_funcs;
6277 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
6279 switch (adev->asic_type) {
6286 adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
6293 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
6295 /* init asci gds info */
6296 switch (adev->asic_type) {
6300 adev->gds.gds_size = 0x10000;
6304 adev->gds.gds_size = 0x1000;
6307 adev->gds.gds_size = 0x10000;
6311 switch (adev->asic_type) {
6314 adev->gds.gds_compute_max_wave_id = 0x7ff;
6317 adev->gds.gds_compute_max_wave_id = 0x27f;
6320 if (adev->rev_id >= 0x8)
6321 adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
6323 adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
6326 adev->gds.gds_compute_max_wave_id = 0xfff;
6329 /* this really depends on the chip */
6330 adev->gds.gds_compute_max_wave_id = 0x7ff;
6334 adev->gds.gws_size = 64;
6335 adev->gds.oa_size = 16;
6338 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
6346 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
6347 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
6349 WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
6352 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
6356 data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
6357 data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
6359 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
6360 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
6362 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
6364 return (~data) & mask;
6367 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
6368 struct amdgpu_cu_info *cu_info)
6370 int i, j, k, counter, active_cu_number = 0;
6371 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
6372 unsigned disable_masks[4 * 4];
6374 if (!adev || !cu_info)
6378 * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
6380 if (adev->gfx.config.max_shader_engines *
6381 adev->gfx.config.max_sh_per_se > 16)
6384 amdgpu_gfx_parse_disable_cu(disable_masks,
6385 adev->gfx.config.max_shader_engines,
6386 adev->gfx.config.max_sh_per_se);
6388 mutex_lock(&adev->grbm_idx_mutex);
6389 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
6390 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
6394 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
6395 gfx_v9_0_set_user_cu_inactive_bitmap(
6396 adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
6397 bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
6400 * The bitmap(and ao_cu_bitmap) in cu_info structure is
6401 * 4x4 size array, and it's usually suitable for Vega
6402 * ASICs which has 4*2 SE/SH layout.
6403 * But for Arcturus, SE/SH layout is changed to 8*1.
6404 * To mostly reduce the impact, we make it compatible
6405 * with current bitmap array as below:
6406 * SE4,SH0 --> bitmap[0][1]
6407 * SE5,SH0 --> bitmap[1][1]
6408 * SE6,SH0 --> bitmap[2][1]
6409 * SE7,SH0 --> bitmap[3][1]
6411 cu_info->bitmap[i % 4][j + i / 4] = bitmap;
6413 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
6414 if (bitmap & mask) {
6415 if (counter < adev->gfx.config.max_cu_per_sh)
6421 active_cu_number += counter;
6423 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
6424 cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
6427 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
6428 mutex_unlock(&adev->grbm_idx_mutex);
6430 cu_info->number = active_cu_number;
6431 cu_info->ao_cu_mask = ao_cu_mask;
6432 cu_info->simd_per_cu = NUM_SIMD_PER_CU;
6437 const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
6439 .type = AMD_IP_BLOCK_TYPE_GFX,
6443 .funcs = &gfx_v9_0_ip_funcs,