Merge tag 'drm-intel-next-fixes-2020-03-27' of git://anongit.freedesktop.org/drm...
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / gfx_v9_0.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/delay.h>
25 #include <linux/kernel.h>
26 #include <linux/firmware.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29
30 #include "amdgpu.h"
31 #include "amdgpu_gfx.h"
32 #include "soc15.h"
33 #include "soc15d.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "amdgpu_pm.h"
36
37 #include "gc/gc_9_0_offset.h"
38 #include "gc/gc_9_0_sh_mask.h"
39
40 #include "vega10_enum.h"
41 #include "hdp/hdp_4_0_offset.h"
42
43 #include "soc15_common.h"
44 #include "clearstate_gfx9.h"
45 #include "v9_structs.h"
46
47 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
48
49 #include "amdgpu_ras.h"
50
51 #include "gfx_v9_4.h"
52
53 #define GFX9_NUM_GFX_RINGS     1
54 #define GFX9_MEC_HPD_SIZE 4096
55 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
56 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
57
58 #define mmPWR_MISC_CNTL_STATUS                                  0x0183
59 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX                         0
60 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT        0x0
61 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT          0x1
62 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK          0x00000001L
63 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK            0x00000006L
64
65 #define mmGCEA_PROBE_MAP                        0x070c
66 #define mmGCEA_PROBE_MAP_BASE_IDX               0
67
68 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
69 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
70 MODULE_FIRMWARE("amdgpu/vega10_me.bin");
71 MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
72 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
73 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
74
75 MODULE_FIRMWARE("amdgpu/vega12_ce.bin");
76 MODULE_FIRMWARE("amdgpu/vega12_pfp.bin");
77 MODULE_FIRMWARE("amdgpu/vega12_me.bin");
78 MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
79 MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
80 MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
81
82 MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
83 MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
84 MODULE_FIRMWARE("amdgpu/vega20_me.bin");
85 MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
86 MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
87 MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");
88
89 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
90 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
91 MODULE_FIRMWARE("amdgpu/raven_me.bin");
92 MODULE_FIRMWARE("amdgpu/raven_mec.bin");
93 MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
94 MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
95
96 MODULE_FIRMWARE("amdgpu/picasso_ce.bin");
97 MODULE_FIRMWARE("amdgpu/picasso_pfp.bin");
98 MODULE_FIRMWARE("amdgpu/picasso_me.bin");
99 MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
100 MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
101 MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
102 MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
103
104 MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
105 MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
106 MODULE_FIRMWARE("amdgpu/raven2_me.bin");
107 MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
108 MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
109 MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
110 MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");
111
112 MODULE_FIRMWARE("amdgpu/arcturus_mec.bin");
113 MODULE_FIRMWARE("amdgpu/arcturus_mec2.bin");
114 MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin");
115
116 MODULE_FIRMWARE("amdgpu/renoir_ce.bin");
117 MODULE_FIRMWARE("amdgpu/renoir_pfp.bin");
118 MODULE_FIRMWARE("amdgpu/renoir_me.bin");
119 MODULE_FIRMWARE("amdgpu/renoir_mec.bin");
120 MODULE_FIRMWARE("amdgpu/renoir_mec2.bin");
121 MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
122
123 #define mmTCP_CHAN_STEER_0_ARCT                                                         0x0b03
124 #define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX                                                        0
125 #define mmTCP_CHAN_STEER_1_ARCT                                                         0x0b04
126 #define mmTCP_CHAN_STEER_1_ARCT_BASE_IDX                                                        0
127 #define mmTCP_CHAN_STEER_2_ARCT                                                         0x0b09
128 #define mmTCP_CHAN_STEER_2_ARCT_BASE_IDX                                                        0
129 #define mmTCP_CHAN_STEER_3_ARCT                                                         0x0b0a
130 #define mmTCP_CHAN_STEER_3_ARCT_BASE_IDX                                                        0
131 #define mmTCP_CHAN_STEER_4_ARCT                                                         0x0b0b
132 #define mmTCP_CHAN_STEER_4_ARCT_BASE_IDX                                                        0
133 #define mmTCP_CHAN_STEER_5_ARCT                                                         0x0b0c
134 #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX                                                        0
135
136 enum ta_ras_gfx_subblock {
137         /*CPC*/
138         TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
139         TA_RAS_BLOCK__GFX_CPC_SCRATCH = TA_RAS_BLOCK__GFX_CPC_INDEX_START,
140         TA_RAS_BLOCK__GFX_CPC_UCODE,
141         TA_RAS_BLOCK__GFX_DC_STATE_ME1,
142         TA_RAS_BLOCK__GFX_DC_CSINVOC_ME1,
143         TA_RAS_BLOCK__GFX_DC_RESTORE_ME1,
144         TA_RAS_BLOCK__GFX_DC_STATE_ME2,
145         TA_RAS_BLOCK__GFX_DC_CSINVOC_ME2,
146         TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
147         TA_RAS_BLOCK__GFX_CPC_INDEX_END = TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
148         /* CPF*/
149         TA_RAS_BLOCK__GFX_CPF_INDEX_START,
150         TA_RAS_BLOCK__GFX_CPF_ROQ_ME2 = TA_RAS_BLOCK__GFX_CPF_INDEX_START,
151         TA_RAS_BLOCK__GFX_CPF_ROQ_ME1,
152         TA_RAS_BLOCK__GFX_CPF_TAG,
153         TA_RAS_BLOCK__GFX_CPF_INDEX_END = TA_RAS_BLOCK__GFX_CPF_TAG,
154         /* CPG*/
155         TA_RAS_BLOCK__GFX_CPG_INDEX_START,
156         TA_RAS_BLOCK__GFX_CPG_DMA_ROQ = TA_RAS_BLOCK__GFX_CPG_INDEX_START,
157         TA_RAS_BLOCK__GFX_CPG_DMA_TAG,
158         TA_RAS_BLOCK__GFX_CPG_TAG,
159         TA_RAS_BLOCK__GFX_CPG_INDEX_END = TA_RAS_BLOCK__GFX_CPG_TAG,
160         /* GDS*/
161         TA_RAS_BLOCK__GFX_GDS_INDEX_START,
162         TA_RAS_BLOCK__GFX_GDS_MEM = TA_RAS_BLOCK__GFX_GDS_INDEX_START,
163         TA_RAS_BLOCK__GFX_GDS_INPUT_QUEUE,
164         TA_RAS_BLOCK__GFX_GDS_OA_PHY_CMD_RAM_MEM,
165         TA_RAS_BLOCK__GFX_GDS_OA_PHY_DATA_RAM_MEM,
166         TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
167         TA_RAS_BLOCK__GFX_GDS_INDEX_END = TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
168         /* SPI*/
169         TA_RAS_BLOCK__GFX_SPI_SR_MEM,
170         /* SQ*/
171         TA_RAS_BLOCK__GFX_SQ_INDEX_START,
172         TA_RAS_BLOCK__GFX_SQ_SGPR = TA_RAS_BLOCK__GFX_SQ_INDEX_START,
173         TA_RAS_BLOCK__GFX_SQ_LDS_D,
174         TA_RAS_BLOCK__GFX_SQ_LDS_I,
175         TA_RAS_BLOCK__GFX_SQ_VGPR, /* VGPR = SP*/
176         TA_RAS_BLOCK__GFX_SQ_INDEX_END = TA_RAS_BLOCK__GFX_SQ_VGPR,
177         /* SQC (3 ranges)*/
178         TA_RAS_BLOCK__GFX_SQC_INDEX_START,
179         /* SQC range 0*/
180         TA_RAS_BLOCK__GFX_SQC_INDEX0_START = TA_RAS_BLOCK__GFX_SQC_INDEX_START,
181         TA_RAS_BLOCK__GFX_SQC_INST_UTCL1_LFIFO =
182                 TA_RAS_BLOCK__GFX_SQC_INDEX0_START,
183         TA_RAS_BLOCK__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
184         TA_RAS_BLOCK__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
185         TA_RAS_BLOCK__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
186         TA_RAS_BLOCK__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
187         TA_RAS_BLOCK__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
188         TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
189         TA_RAS_BLOCK__GFX_SQC_INDEX0_END =
190                 TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
191         /* SQC range 1*/
192         TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
193         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_TAG_RAM =
194                 TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
195         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
196         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_MISS_FIFO,
197         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_BANK_RAM,
198         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_TAG_RAM,
199         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_HIT_FIFO,
200         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_MISS_FIFO,
201         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
202         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
203         TA_RAS_BLOCK__GFX_SQC_INDEX1_END =
204                 TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
205         /* SQC range 2*/
206         TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
207         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_TAG_RAM =
208                 TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
209         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
210         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_MISS_FIFO,
211         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_BANK_RAM,
212         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_TAG_RAM,
213         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_HIT_FIFO,
214         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_MISS_FIFO,
215         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
216         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
217         TA_RAS_BLOCK__GFX_SQC_INDEX2_END =
218                 TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
219         TA_RAS_BLOCK__GFX_SQC_INDEX_END = TA_RAS_BLOCK__GFX_SQC_INDEX2_END,
220         /* TA*/
221         TA_RAS_BLOCK__GFX_TA_INDEX_START,
222         TA_RAS_BLOCK__GFX_TA_FS_DFIFO = TA_RAS_BLOCK__GFX_TA_INDEX_START,
223         TA_RAS_BLOCK__GFX_TA_FS_AFIFO,
224         TA_RAS_BLOCK__GFX_TA_FL_LFIFO,
225         TA_RAS_BLOCK__GFX_TA_FX_LFIFO,
226         TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
227         TA_RAS_BLOCK__GFX_TA_INDEX_END = TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
228         /* TCA*/
229         TA_RAS_BLOCK__GFX_TCA_INDEX_START,
230         TA_RAS_BLOCK__GFX_TCA_HOLE_FIFO = TA_RAS_BLOCK__GFX_TCA_INDEX_START,
231         TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
232         TA_RAS_BLOCK__GFX_TCA_INDEX_END = TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
233         /* TCC (5 sub-ranges)*/
234         TA_RAS_BLOCK__GFX_TCC_INDEX_START,
235         /* TCC range 0*/
236         TA_RAS_BLOCK__GFX_TCC_INDEX0_START = TA_RAS_BLOCK__GFX_TCC_INDEX_START,
237         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX0_START,
238         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_0_1,
239         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_0,
240         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_1,
241         TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_0,
242         TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_1,
243         TA_RAS_BLOCK__GFX_TCC_HIGH_RATE_TAG,
244         TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
245         TA_RAS_BLOCK__GFX_TCC_INDEX0_END = TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
246         /* TCC range 1*/
247         TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
248         TA_RAS_BLOCK__GFX_TCC_IN_USE_DEC = TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
249         TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
250         TA_RAS_BLOCK__GFX_TCC_INDEX1_END =
251                 TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
252         /* TCC range 2*/
253         TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
254         TA_RAS_BLOCK__GFX_TCC_RETURN_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
255         TA_RAS_BLOCK__GFX_TCC_RETURN_CONTROL,
256         TA_RAS_BLOCK__GFX_TCC_UC_ATOMIC_FIFO,
257         TA_RAS_BLOCK__GFX_TCC_WRITE_RETURN,
258         TA_RAS_BLOCK__GFX_TCC_WRITE_CACHE_READ,
259         TA_RAS_BLOCK__GFX_TCC_SRC_FIFO,
260         TA_RAS_BLOCK__GFX_TCC_SRC_FIFO_NEXT_RAM,
261         TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
262         TA_RAS_BLOCK__GFX_TCC_INDEX2_END =
263                 TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
264         /* TCC range 3*/
265         TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
266         TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO = TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
267         TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
268         TA_RAS_BLOCK__GFX_TCC_INDEX3_END =
269                 TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
270         /* TCC range 4*/
271         TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
272         TA_RAS_BLOCK__GFX_TCC_WRRET_TAG_WRITE_RETURN =
273                 TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
274         TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
275         TA_RAS_BLOCK__GFX_TCC_INDEX4_END =
276                 TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
277         TA_RAS_BLOCK__GFX_TCC_INDEX_END = TA_RAS_BLOCK__GFX_TCC_INDEX4_END,
278         /* TCI*/
279         TA_RAS_BLOCK__GFX_TCI_WRITE_RAM,
280         /* TCP*/
281         TA_RAS_BLOCK__GFX_TCP_INDEX_START,
282         TA_RAS_BLOCK__GFX_TCP_CACHE_RAM = TA_RAS_BLOCK__GFX_TCP_INDEX_START,
283         TA_RAS_BLOCK__GFX_TCP_LFIFO_RAM,
284         TA_RAS_BLOCK__GFX_TCP_CMD_FIFO,
285         TA_RAS_BLOCK__GFX_TCP_VM_FIFO,
286         TA_RAS_BLOCK__GFX_TCP_DB_RAM,
287         TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO0,
288         TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
289         TA_RAS_BLOCK__GFX_TCP_INDEX_END = TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
290         /* TD*/
291         TA_RAS_BLOCK__GFX_TD_INDEX_START,
292         TA_RAS_BLOCK__GFX_TD_SS_FIFO_LO = TA_RAS_BLOCK__GFX_TD_INDEX_START,
293         TA_RAS_BLOCK__GFX_TD_SS_FIFO_HI,
294         TA_RAS_BLOCK__GFX_TD_CS_FIFO,
295         TA_RAS_BLOCK__GFX_TD_INDEX_END = TA_RAS_BLOCK__GFX_TD_CS_FIFO,
296         /* EA (3 sub-ranges)*/
297         TA_RAS_BLOCK__GFX_EA_INDEX_START,
298         /* EA range 0*/
299         TA_RAS_BLOCK__GFX_EA_INDEX0_START = TA_RAS_BLOCK__GFX_EA_INDEX_START,
300         TA_RAS_BLOCK__GFX_EA_DRAMRD_CMDMEM = TA_RAS_BLOCK__GFX_EA_INDEX0_START,
301         TA_RAS_BLOCK__GFX_EA_DRAMWR_CMDMEM,
302         TA_RAS_BLOCK__GFX_EA_DRAMWR_DATAMEM,
303         TA_RAS_BLOCK__GFX_EA_RRET_TAGMEM,
304         TA_RAS_BLOCK__GFX_EA_WRET_TAGMEM,
305         TA_RAS_BLOCK__GFX_EA_GMIRD_CMDMEM,
306         TA_RAS_BLOCK__GFX_EA_GMIWR_CMDMEM,
307         TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
308         TA_RAS_BLOCK__GFX_EA_INDEX0_END = TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
309         /* EA range 1*/
310         TA_RAS_BLOCK__GFX_EA_INDEX1_START,
311         TA_RAS_BLOCK__GFX_EA_DRAMRD_PAGEMEM = TA_RAS_BLOCK__GFX_EA_INDEX1_START,
312         TA_RAS_BLOCK__GFX_EA_DRAMWR_PAGEMEM,
313         TA_RAS_BLOCK__GFX_EA_IORD_CMDMEM,
314         TA_RAS_BLOCK__GFX_EA_IOWR_CMDMEM,
315         TA_RAS_BLOCK__GFX_EA_IOWR_DATAMEM,
316         TA_RAS_BLOCK__GFX_EA_GMIRD_PAGEMEM,
317         TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
318         TA_RAS_BLOCK__GFX_EA_INDEX1_END = TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
319         /* EA range 2*/
320         TA_RAS_BLOCK__GFX_EA_INDEX2_START,
321         TA_RAS_BLOCK__GFX_EA_MAM_D0MEM = TA_RAS_BLOCK__GFX_EA_INDEX2_START,
322         TA_RAS_BLOCK__GFX_EA_MAM_D1MEM,
323         TA_RAS_BLOCK__GFX_EA_MAM_D2MEM,
324         TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
325         TA_RAS_BLOCK__GFX_EA_INDEX2_END = TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
326         TA_RAS_BLOCK__GFX_EA_INDEX_END = TA_RAS_BLOCK__GFX_EA_INDEX2_END,
327         /* UTC VM L2 bank*/
328         TA_RAS_BLOCK__UTC_VML2_BANK_CACHE,
329         /* UTC VM walker*/
330         TA_RAS_BLOCK__UTC_VML2_WALKER,
331         /* UTC ATC L2 2MB cache*/
332         TA_RAS_BLOCK__UTC_ATCL2_CACHE_2M_BANK,
333         /* UTC ATC L2 4KB cache*/
334         TA_RAS_BLOCK__UTC_ATCL2_CACHE_4K_BANK,
335         TA_RAS_BLOCK__GFX_MAX
336 };
337
338 struct ras_gfx_subblock {
339         unsigned char *name;
340         int ta_subblock;
341         int hw_supported_error_type;
342         int sw_supported_error_type;
343 };
344
345 #define AMDGPU_RAS_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h)                             \
346         [AMDGPU_RAS_BLOCK__##subblock] = {                                     \
347                 #subblock,                                                     \
348                 TA_RAS_BLOCK__##subblock,                                      \
349                 ((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)),                  \
350                 (((e) << 1) | ((f) << 3) | (g) | ((h) << 2)),                  \
351         }
352
353 static const struct ras_gfx_subblock ras_gfx_subblocks[] = {
354         AMDGPU_RAS_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1, 1, 0, 0, 1),
355         AMDGPU_RAS_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1, 1, 0, 0, 1),
356         AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
357         AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
358         AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
359         AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
360         AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
361         AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
362         AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
363         AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
364         AMDGPU_RAS_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1, 1, 0, 0, 1),
365         AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1, 0, 0, 1, 0),
366         AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1, 0, 1, 0, 1),
367         AMDGPU_RAS_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1, 1, 1, 0, 1),
368         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
369         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1, 0, 0, 0, 0),
370         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1, 0, 0, 0,
371                              0),
372         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1, 0, 0, 0,
373                              0),
374         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
375         AMDGPU_RAS_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1, 0, 0, 0, 0),
376         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1, 0, 0, 0, 0),
377         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1, 1, 0, 0, 1),
378         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1, 0, 0, 0, 0),
379         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1, 0, 0, 0, 0),
380         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, 1),
381         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
382                              0, 0),
383         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
384                              0),
385         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
386                              0, 0),
387         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1, 1, 0, 0,
388                              0),
389         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
390                              0, 0),
391         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
392                              0),
393         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
394                              1),
395         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
396                              0, 0, 0),
397         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
398                              0),
399         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
400                              0),
401         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
402                              0),
403         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
404                              0),
405         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
406                              0),
407         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
408                              0, 0),
409         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
410                              0),
411         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
412                              0),
413         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
414                              0, 0, 0),
415         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
416                              0),
417         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
418                              0),
419         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
420                              0),
421         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
422                              0),
423         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
424                              0),
425         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
426                              0, 0),
427         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
428                              0),
429         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1, 1, 0, 0, 1),
430         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
431         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
432         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
433         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
434         AMDGPU_RAS_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1, 0, 1, 1, 0),
435         AMDGPU_RAS_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
436         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1, 1, 0, 0, 1),
437         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1, 1, 0, 0,
438                              1),
439         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1, 1, 0, 0,
440                              1),
441         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1, 1, 0, 0,
442                              1),
443         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1, 0, 0, 0,
444                              0),
445         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1, 0, 0, 0,
446                              0),
447         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
448         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
449         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1, 0, 0, 0, 0),
450         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1, 0, 0, 0, 0),
451         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1, 0, 0, 0, 0),
452         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1, 0, 0, 0, 0),
453         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
454         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1, 0, 1, 1, 0),
455         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1, 0, 0, 0, 0),
456         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
457         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 1, 0),
458         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1, 0, 0, 0,
459                              0),
460         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
461         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 0,
462                              0),
463         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1, 0, 0,
464                              0, 0),
465         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1, 0, 0, 0,
466                              0),
467         AMDGPU_RAS_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
468         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1, 1, 0, 0, 1),
469         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1, 0, 0, 0, 0),
470         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
471         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
472         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
473         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1, 0, 0, 0, 0),
474         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1, 0, 0, 0, 0),
475         AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1, 1, 0, 0, 1),
476         AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1, 0, 0, 0, 0),
477         AMDGPU_RAS_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
478         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1, 1, 0, 0, 1),
479         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
480         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
481         AMDGPU_RAS_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
482         AMDGPU_RAS_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
483         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
484         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
485         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
486         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
487         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
488         AMDGPU_RAS_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
489         AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
490         AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1, 0, 0, 0, 0),
491         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
492         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
493         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1, 0, 0, 0, 0),
494         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1, 0, 0, 0, 0),
495         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1, 0, 0, 0, 0),
496         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1, 0, 0, 0, 0),
497         AMDGPU_RAS_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1, 0, 0, 0, 0),
498         AMDGPU_RAS_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1, 0, 0, 0, 0),
499         AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1, 0, 0, 0, 0),
500         AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1, 0, 0, 0, 0),
501 };
502
503 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
504 {
505         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
506         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000),
507         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
508         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
509         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
510         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
511         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
512         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
513         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
514         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
515         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
516         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
517         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
518         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
519         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
520         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
521         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
522         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
523         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
524         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
525 };
526
527 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
528 {
529         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
530         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
531         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
532         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
533         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
534         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
535         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
536         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
537         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
538         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
539         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
540         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
541         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
542         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
543         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
544         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
545         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
546         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
547 };
548
549 static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
550 {
551         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080),
552         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
553         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
554         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
555         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042),
556         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400),
557         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000),
558         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000),
559         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107),
560         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000),
561         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000)
562 };
563
564 static const struct soc15_reg_golden golden_settings_gc_9_1[] =
565 {
566         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
567         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
568         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
569         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
570         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
571         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
572         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
573         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
574         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
575         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
576         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
577         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
578         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
579         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
580         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
581         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
582         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
583         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
584         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
585         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
586         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
587         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
588         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
589         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
590 };
591
592 static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
593 {
594         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
595         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
596         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
597         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
598         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
599         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
600         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
601 };
602
603 static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
604 {
605         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0xff7fffff, 0x04000000),
606         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
607         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
608         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x7f0fffff, 0x08000080),
609         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0xff8fffff, 0x08000080),
610         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x7f8fffff, 0x08000080),
611         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x26013041),
612         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x26013041),
613         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
614         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
615         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0xff0fffff, 0x08000080),
616         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0xff0fffff, 0x08000080),
617         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0xff0fffff, 0x08000080),
618         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0xff0fffff, 0x08000080),
619         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0xff0fffff, 0x08000080),
620         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
621         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010),
622         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
623         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
624 };
625
626 static const struct soc15_reg_golden golden_settings_gc_9_1_rn[] =
627 {
628         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
629         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
630         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
631         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x24000042),
632         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x24000042),
633         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
634         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
635         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
636         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
637         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
638         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
639         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_PROBE_MAP, 0xffffffff, 0x0000cccc),
640 };
641
642 static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
643 {
644         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff),
645         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
646         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
647 };
648
649 static const struct soc15_reg_golden golden_settings_gc_9_2_1[] =
650 {
651         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
652         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
653         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
654         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
655         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
656         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
657         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
658         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
659         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
660         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
661         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
662         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
663         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
664         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
665         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
666         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
667 };
668
669 static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
670 {
671         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080),
672         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
673         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
674         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041),
675         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041),
676         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
677         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
678         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
679         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
680         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
681         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
682         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
683         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
684 };
685
686 static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
687 {
688         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
689         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x10b0000),
690         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_0_ARCT, 0x3fffffff, 0x346f0a4e),
691         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_1_ARCT, 0x3fffffff, 0x1c642ca),
692         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_2_ARCT, 0x3fffffff, 0x26f45098),
693         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_3_ARCT, 0x3fffffff, 0x2ebd9fe3),
694         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1),
695         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
696         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
697         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
698 };
699
700 static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = {
701         {SOC15_REG_ENTRY(GC, 0, mmGRBM_GFX_INDEX)},
702         {SOC15_REG_ENTRY(GC, 0, mmSQ_IND_INDEX)},
703 };
704
705 static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
706 {
707         mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
708         mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
709         mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
710         mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
711         mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
712         mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
713         mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
714         mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
715 };
716
717 static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
718 {
719         mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0,
720         mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0,
721         mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0,
722         mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0,
723         mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0,
724         mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0,
725         mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0,
726         mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
727 };
728
729 void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
730 {
731         static void *scratch_reg0;
732         static void *scratch_reg1;
733         static void *scratch_reg2;
734         static void *scratch_reg3;
735         static void *spare_int;
736         static uint32_t grbm_cntl;
737         static uint32_t grbm_idx;
738
739         scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
740         scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
741         scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4;
742         scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4;
743         spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
744
745         grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
746         grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
747
748         if (amdgpu_sriov_runtime(adev)) {
749                 pr_err("shouldn't call rlcg write register during runtime\n");
750                 return;
751         }
752
753         if (offset == grbm_cntl || offset == grbm_idx) {
754                 if (offset  == grbm_cntl)
755                         writel(v, scratch_reg2);
756                 else if (offset == grbm_idx)
757                         writel(v, scratch_reg3);
758
759                 writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
760         } else {
761                 uint32_t i = 0;
762                 uint32_t retries = 50000;
763
764                 writel(v, scratch_reg0);
765                 writel(offset | 0x80000000, scratch_reg1);
766                 writel(1, spare_int);
767                 for (i = 0; i < retries; i++) {
768                         u32 tmp;
769
770                         tmp = readl(scratch_reg1);
771                         if (!(tmp & 0x80000000))
772                                 break;
773
774                         udelay(10);
775                 }
776                 if (i >= retries)
777                         pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
778         }
779
780 }
781
782 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
783 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
784 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
785 #define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041
786
787 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
788 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
789 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
790 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
791 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
792                                  struct amdgpu_cu_info *cu_info);
793 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
794 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
795 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
796 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
797 static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
798                                           void *ras_error_status);
799 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
800                                      void *inject_if);
801 static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev);
802
803 static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
804                                 uint64_t queue_mask)
805 {
806         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
807         amdgpu_ring_write(kiq_ring,
808                 PACKET3_SET_RESOURCES_VMID_MASK(0) |
809                 /* vmid_mask:0* queue_type:0 (KIQ) */
810                 PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
811         amdgpu_ring_write(kiq_ring,
812                         lower_32_bits(queue_mask));     /* queue mask lo */
813         amdgpu_ring_write(kiq_ring,
814                         upper_32_bits(queue_mask));     /* queue mask hi */
815         amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
816         amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
817         amdgpu_ring_write(kiq_ring, 0); /* oac mask */
818         amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
819 }
820
821 static void gfx_v9_0_kiq_map_queues(struct amdgpu_ring *kiq_ring,
822                                  struct amdgpu_ring *ring)
823 {
824         struct amdgpu_device *adev = kiq_ring->adev;
825         uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
826         uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
827         uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
828
829         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
830         /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
831         amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
832                          PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
833                          PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
834                          PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
835                          PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
836                          PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
837                          /*queue_type: normal compute queue */
838                          PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
839                          /* alloc format: all_on_one_pipe */
840                          PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
841                          PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
842                          /* num_queues: must be 1 */
843                          PACKET3_MAP_QUEUES_NUM_QUEUES(1));
844         amdgpu_ring_write(kiq_ring,
845                         PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
846         amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
847         amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
848         amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
849         amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
850 }
851
852 static void gfx_v9_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
853                                    struct amdgpu_ring *ring,
854                                    enum amdgpu_unmap_queues_action action,
855                                    u64 gpu_addr, u64 seq)
856 {
857         uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
858
859         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
860         amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
861                           PACKET3_UNMAP_QUEUES_ACTION(action) |
862                           PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
863                           PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
864                           PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
865         amdgpu_ring_write(kiq_ring,
866                         PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
867
868         if (action == PREEMPT_QUEUES_NO_UNMAP) {
869                 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
870                 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
871                 amdgpu_ring_write(kiq_ring, seq);
872         } else {
873                 amdgpu_ring_write(kiq_ring, 0);
874                 amdgpu_ring_write(kiq_ring, 0);
875                 amdgpu_ring_write(kiq_ring, 0);
876         }
877 }
878
879 static void gfx_v9_0_kiq_query_status(struct amdgpu_ring *kiq_ring,
880                                    struct amdgpu_ring *ring,
881                                    u64 addr,
882                                    u64 seq)
883 {
884         uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
885
886         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
887         amdgpu_ring_write(kiq_ring,
888                           PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
889                           PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
890                           PACKET3_QUERY_STATUS_COMMAND(2));
891         /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
892         amdgpu_ring_write(kiq_ring,
893                         PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
894                         PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
895         amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
896         amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
897         amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
898         amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
899 }
900
901 static void gfx_v9_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
902                                 uint16_t pasid, uint32_t flush_type,
903                                 bool all_hub)
904 {
905         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
906         amdgpu_ring_write(kiq_ring,
907                         PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
908                         PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
909                         PACKET3_INVALIDATE_TLBS_PASID(pasid) |
910                         PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
911 }
912
913 static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
914         .kiq_set_resources = gfx_v9_0_kiq_set_resources,
915         .kiq_map_queues = gfx_v9_0_kiq_map_queues,
916         .kiq_unmap_queues = gfx_v9_0_kiq_unmap_queues,
917         .kiq_query_status = gfx_v9_0_kiq_query_status,
918         .kiq_invalidate_tlbs = gfx_v9_0_kiq_invalidate_tlbs,
919         .set_resources_size = 8,
920         .map_queues_size = 7,
921         .unmap_queues_size = 6,
922         .query_status_size = 7,
923         .invalidate_tlbs_size = 2,
924 };
925
926 static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
927 {
928         adev->gfx.kiq.pmf = &gfx_v9_0_kiq_pm4_funcs;
929 }
930
931 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
932 {
933         switch (adev->asic_type) {
934         case CHIP_VEGA10:
935                 soc15_program_register_sequence(adev,
936                                                 golden_settings_gc_9_0,
937                                                 ARRAY_SIZE(golden_settings_gc_9_0));
938                 soc15_program_register_sequence(adev,
939                                                 golden_settings_gc_9_0_vg10,
940                                                 ARRAY_SIZE(golden_settings_gc_9_0_vg10));
941                 break;
942         case CHIP_VEGA12:
943                 soc15_program_register_sequence(adev,
944                                                 golden_settings_gc_9_2_1,
945                                                 ARRAY_SIZE(golden_settings_gc_9_2_1));
946                 soc15_program_register_sequence(adev,
947                                                 golden_settings_gc_9_2_1_vg12,
948                                                 ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
949                 break;
950         case CHIP_VEGA20:
951                 soc15_program_register_sequence(adev,
952                                                 golden_settings_gc_9_0,
953                                                 ARRAY_SIZE(golden_settings_gc_9_0));
954                 soc15_program_register_sequence(adev,
955                                                 golden_settings_gc_9_0_vg20,
956                                                 ARRAY_SIZE(golden_settings_gc_9_0_vg20));
957                 break;
958         case CHIP_ARCTURUS:
959                 soc15_program_register_sequence(adev,
960                                                 golden_settings_gc_9_4_1_arct,
961                                                 ARRAY_SIZE(golden_settings_gc_9_4_1_arct));
962                 break;
963         case CHIP_RAVEN:
964                 soc15_program_register_sequence(adev, golden_settings_gc_9_1,
965                                                 ARRAY_SIZE(golden_settings_gc_9_1));
966                 if (adev->rev_id >= 8)
967                         soc15_program_register_sequence(adev,
968                                                         golden_settings_gc_9_1_rv2,
969                                                         ARRAY_SIZE(golden_settings_gc_9_1_rv2));
970                 else
971                         soc15_program_register_sequence(adev,
972                                                         golden_settings_gc_9_1_rv1,
973                                                         ARRAY_SIZE(golden_settings_gc_9_1_rv1));
974                 break;
975          case CHIP_RENOIR:
976                 soc15_program_register_sequence(adev,
977                                                 golden_settings_gc_9_1_rn,
978                                                 ARRAY_SIZE(golden_settings_gc_9_1_rn));
979                 return; /* for renoir, don't need common goldensetting */
980         default:
981                 break;
982         }
983
984         if (adev->asic_type != CHIP_ARCTURUS)
985                 soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
986                                                 (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
987 }
988
989 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
990 {
991         adev->gfx.scratch.num_reg = 8;
992         adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
993         adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
994 }
995
996 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
997                                        bool wc, uint32_t reg, uint32_t val)
998 {
999         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
1000         amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
1001                                 WRITE_DATA_DST_SEL(0) |
1002                                 (wc ? WR_CONFIRM : 0));
1003         amdgpu_ring_write(ring, reg);
1004         amdgpu_ring_write(ring, 0);
1005         amdgpu_ring_write(ring, val);
1006 }
1007
1008 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
1009                                   int mem_space, int opt, uint32_t addr0,
1010                                   uint32_t addr1, uint32_t ref, uint32_t mask,
1011                                   uint32_t inv)
1012 {
1013         amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
1014         amdgpu_ring_write(ring,
1015                                  /* memory (1) or register (0) */
1016                                  (WAIT_REG_MEM_MEM_SPACE(mem_space) |
1017                                  WAIT_REG_MEM_OPERATION(opt) | /* wait */
1018                                  WAIT_REG_MEM_FUNCTION(3) |  /* equal */
1019                                  WAIT_REG_MEM_ENGINE(eng_sel)));
1020
1021         if (mem_space)
1022                 BUG_ON(addr0 & 0x3); /* Dword align */
1023         amdgpu_ring_write(ring, addr0);
1024         amdgpu_ring_write(ring, addr1);
1025         amdgpu_ring_write(ring, ref);
1026         amdgpu_ring_write(ring, mask);
1027         amdgpu_ring_write(ring, inv); /* poll interval */
1028 }
1029
1030 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
1031 {
1032         struct amdgpu_device *adev = ring->adev;
1033         uint32_t scratch;
1034         uint32_t tmp = 0;
1035         unsigned i;
1036         int r;
1037
1038         r = amdgpu_gfx_scratch_get(adev, &scratch);
1039         if (r)
1040                 return r;
1041
1042         WREG32(scratch, 0xCAFEDEAD);
1043         r = amdgpu_ring_alloc(ring, 3);
1044         if (r)
1045                 goto error_free_scratch;
1046
1047         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
1048         amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
1049         amdgpu_ring_write(ring, 0xDEADBEEF);
1050         amdgpu_ring_commit(ring);
1051
1052         for (i = 0; i < adev->usec_timeout; i++) {
1053                 tmp = RREG32(scratch);
1054                 if (tmp == 0xDEADBEEF)
1055                         break;
1056                 udelay(1);
1057         }
1058
1059         if (i >= adev->usec_timeout)
1060                 r = -ETIMEDOUT;
1061
1062 error_free_scratch:
1063         amdgpu_gfx_scratch_free(adev, scratch);
1064         return r;
1065 }
1066
1067 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1068 {
1069         struct amdgpu_device *adev = ring->adev;
1070         struct amdgpu_ib ib;
1071         struct dma_fence *f = NULL;
1072
1073         unsigned index;
1074         uint64_t gpu_addr;
1075         uint32_t tmp;
1076         long r;
1077
1078         r = amdgpu_device_wb_get(adev, &index);
1079         if (r)
1080                 return r;
1081
1082         gpu_addr = adev->wb.gpu_addr + (index * 4);
1083         adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
1084         memset(&ib, 0, sizeof(ib));
1085         r = amdgpu_ib_get(adev, NULL, 16, &ib);
1086         if (r)
1087                 goto err1;
1088
1089         ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
1090         ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
1091         ib.ptr[2] = lower_32_bits(gpu_addr);
1092         ib.ptr[3] = upper_32_bits(gpu_addr);
1093         ib.ptr[4] = 0xDEADBEEF;
1094         ib.length_dw = 5;
1095
1096         r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1097         if (r)
1098                 goto err2;
1099
1100         r = dma_fence_wait_timeout(f, false, timeout);
1101         if (r == 0) {
1102                 r = -ETIMEDOUT;
1103                 goto err2;
1104         } else if (r < 0) {
1105                 goto err2;
1106         }
1107
1108         tmp = adev->wb.wb[index];
1109         if (tmp == 0xDEADBEEF)
1110                 r = 0;
1111         else
1112                 r = -EINVAL;
1113
1114 err2:
1115         amdgpu_ib_free(adev, &ib, NULL);
1116         dma_fence_put(f);
1117 err1:
1118         amdgpu_device_wb_free(adev, index);
1119         return r;
1120 }
1121
1122
1123 static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
1124 {
1125         release_firmware(adev->gfx.pfp_fw);
1126         adev->gfx.pfp_fw = NULL;
1127         release_firmware(adev->gfx.me_fw);
1128         adev->gfx.me_fw = NULL;
1129         release_firmware(adev->gfx.ce_fw);
1130         adev->gfx.ce_fw = NULL;
1131         release_firmware(adev->gfx.rlc_fw);
1132         adev->gfx.rlc_fw = NULL;
1133         release_firmware(adev->gfx.mec_fw);
1134         adev->gfx.mec_fw = NULL;
1135         release_firmware(adev->gfx.mec2_fw);
1136         adev->gfx.mec2_fw = NULL;
1137
1138         kfree(adev->gfx.rlc.register_list_format);
1139 }
1140
1141 static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
1142 {
1143         const struct rlc_firmware_header_v2_1 *rlc_hdr;
1144
1145         rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
1146         adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
1147         adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
1148         adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
1149         adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
1150         adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
1151         adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
1152         adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
1153         adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
1154         adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
1155         adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
1156         adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
1157         adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
1158         adev->gfx.rlc.reg_list_format_direct_reg_list_length =
1159                         le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
1160 }
1161
1162 static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
1163 {
1164         adev->gfx.me_fw_write_wait = false;
1165         adev->gfx.mec_fw_write_wait = false;
1166
1167         if ((adev->asic_type != CHIP_ARCTURUS) &&
1168             ((adev->gfx.mec_fw_version < 0x000001a5) ||
1169             (adev->gfx.mec_feature_version < 46) ||
1170             (adev->gfx.pfp_fw_version < 0x000000b7) ||
1171             (adev->gfx.pfp_feature_version < 46)))
1172                 DRM_WARN_ONCE("CP firmware version too old, please update!");
1173
1174         switch (adev->asic_type) {
1175         case CHIP_VEGA10:
1176                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1177                     (adev->gfx.me_feature_version >= 42) &&
1178                     (adev->gfx.pfp_fw_version >=  0x000000b1) &&
1179                     (adev->gfx.pfp_feature_version >= 42))
1180                         adev->gfx.me_fw_write_wait = true;
1181
1182                 if ((adev->gfx.mec_fw_version >=  0x00000193) &&
1183                     (adev->gfx.mec_feature_version >= 42))
1184                         adev->gfx.mec_fw_write_wait = true;
1185                 break;
1186         case CHIP_VEGA12:
1187                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1188                     (adev->gfx.me_feature_version >= 44) &&
1189                     (adev->gfx.pfp_fw_version >=  0x000000b2) &&
1190                     (adev->gfx.pfp_feature_version >= 44))
1191                         adev->gfx.me_fw_write_wait = true;
1192
1193                 if ((adev->gfx.mec_fw_version >=  0x00000196) &&
1194                     (adev->gfx.mec_feature_version >= 44))
1195                         adev->gfx.mec_fw_write_wait = true;
1196                 break;
1197         case CHIP_VEGA20:
1198                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1199                     (adev->gfx.me_feature_version >= 44) &&
1200                     (adev->gfx.pfp_fw_version >=  0x000000b2) &&
1201                     (adev->gfx.pfp_feature_version >= 44))
1202                         adev->gfx.me_fw_write_wait = true;
1203
1204                 if ((adev->gfx.mec_fw_version >=  0x00000197) &&
1205                     (adev->gfx.mec_feature_version >= 44))
1206                         adev->gfx.mec_fw_write_wait = true;
1207                 break;
1208         case CHIP_RAVEN:
1209                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1210                     (adev->gfx.me_feature_version >= 42) &&
1211                     (adev->gfx.pfp_fw_version >=  0x000000b1) &&
1212                     (adev->gfx.pfp_feature_version >= 42))
1213                         adev->gfx.me_fw_write_wait = true;
1214
1215                 if ((adev->gfx.mec_fw_version >=  0x00000192) &&
1216                     (adev->gfx.mec_feature_version >= 42))
1217                         adev->gfx.mec_fw_write_wait = true;
1218                 break;
1219         default:
1220                 break;
1221         }
1222 }
1223
1224 struct amdgpu_gfxoff_quirk {
1225         u16 chip_vendor;
1226         u16 chip_device;
1227         u16 subsys_vendor;
1228         u16 subsys_device;
1229         u8 revision;
1230 };
1231
1232 static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
1233         /* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */
1234         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1235         { 0, 0, 0, 0, 0 },
1236 };
1237
1238 static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev)
1239 {
1240         const struct amdgpu_gfxoff_quirk *p = amdgpu_gfxoff_quirk_list;
1241
1242         while (p && p->chip_device != 0) {
1243                 if (pdev->vendor == p->chip_vendor &&
1244                     pdev->device == p->chip_device &&
1245                     pdev->subsystem_vendor == p->subsys_vendor &&
1246                     pdev->subsystem_device == p->subsys_device &&
1247                     pdev->revision == p->revision) {
1248                         return true;
1249                 }
1250                 ++p;
1251         }
1252         return false;
1253 }
1254
1255 static bool is_raven_kicker(struct amdgpu_device *adev)
1256 {
1257         if (adev->pm.fw_version >= 0x41e2b)
1258                 return true;
1259         else
1260                 return false;
1261 }
1262
1263 static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
1264 {
1265         if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
1266                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1267
1268         switch (adev->asic_type) {
1269         case CHIP_VEGA10:
1270         case CHIP_VEGA12:
1271         case CHIP_VEGA20:
1272                 break;
1273         case CHIP_RAVEN:
1274                 if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) &&
1275                     ((!is_raven_kicker(adev) &&
1276                       adev->gfx.rlc_fw_version < 531) ||
1277                      (adev->gfx.rlc_feature_version < 1) ||
1278                      !adev->gfx.rlc.is_rlc_v2_1))
1279                         adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1280
1281                 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1282                         adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1283                                 AMD_PG_SUPPORT_CP |
1284                                 AMD_PG_SUPPORT_RLC_SMU_HS;
1285                 break;
1286         case CHIP_RENOIR:
1287                 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1288                         adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1289                                 AMD_PG_SUPPORT_CP |
1290                                 AMD_PG_SUPPORT_RLC_SMU_HS;
1291                 break;
1292         default:
1293                 break;
1294         }
1295 }
1296
1297 static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
1298                                           const char *chip_name)
1299 {
1300         char fw_name[30];
1301         int err;
1302         struct amdgpu_firmware_info *info = NULL;
1303         const struct common_firmware_header *header = NULL;
1304         const struct gfx_firmware_header_v1_0 *cp_hdr;
1305
1306         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
1307         err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
1308         if (err)
1309                 goto out;
1310         err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
1311         if (err)
1312                 goto out;
1313         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
1314         adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1315         adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1316
1317         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
1318         err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
1319         if (err)
1320                 goto out;
1321         err = amdgpu_ucode_validate(adev->gfx.me_fw);
1322         if (err)
1323                 goto out;
1324         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
1325         adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1326         adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1327
1328         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
1329         err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
1330         if (err)
1331                 goto out;
1332         err = amdgpu_ucode_validate(adev->gfx.ce_fw);
1333         if (err)
1334                 goto out;
1335         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
1336         adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1337         adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1338
1339         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1340                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
1341                 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
1342                 info->fw = adev->gfx.pfp_fw;
1343                 header = (const struct common_firmware_header *)info->fw->data;
1344                 adev->firmware.fw_size +=
1345                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1346
1347                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
1348                 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
1349                 info->fw = adev->gfx.me_fw;
1350                 header = (const struct common_firmware_header *)info->fw->data;
1351                 adev->firmware.fw_size +=
1352                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1353
1354                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
1355                 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
1356                 info->fw = adev->gfx.ce_fw;
1357                 header = (const struct common_firmware_header *)info->fw->data;
1358                 adev->firmware.fw_size +=
1359                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1360         }
1361
1362 out:
1363         if (err) {
1364                 dev_err(adev->dev,
1365                         "gfx9: Failed to load firmware \"%s\"\n",
1366                         fw_name);
1367                 release_firmware(adev->gfx.pfp_fw);
1368                 adev->gfx.pfp_fw = NULL;
1369                 release_firmware(adev->gfx.me_fw);
1370                 adev->gfx.me_fw = NULL;
1371                 release_firmware(adev->gfx.ce_fw);
1372                 adev->gfx.ce_fw = NULL;
1373         }
1374         return err;
1375 }
1376
1377 static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
1378                                           const char *chip_name)
1379 {
1380         char fw_name[30];
1381         int err;
1382         struct amdgpu_firmware_info *info = NULL;
1383         const struct common_firmware_header *header = NULL;
1384         const struct rlc_firmware_header_v2_0 *rlc_hdr;
1385         unsigned int *tmp = NULL;
1386         unsigned int i = 0;
1387         uint16_t version_major;
1388         uint16_t version_minor;
1389         uint32_t smu_version;
1390
1391         /*
1392          * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
1393          * instead of picasso_rlc.bin.
1394          * Judgment method:
1395          * PCO AM4: revision >= 0xC8 && revision <= 0xCF
1396          *          or revision >= 0xD8 && revision <= 0xDF
1397          * otherwise is PCO FP5
1398          */
1399         if (!strcmp(chip_name, "picasso") &&
1400                 (((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
1401                 ((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
1402                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
1403         else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) &&
1404                 (smu_version >= 0x41e2b))
1405                 /**
1406                 *SMC is loaded by SBIOS on APU and it's able to get the SMU version directly.
1407                 */
1408                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_kicker_rlc.bin", chip_name);
1409         else
1410                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
1411         err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
1412         if (err)
1413                 goto out;
1414         err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
1415         rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1416
1417         version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1418         version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1419         if (version_major == 2 && version_minor == 1)
1420                 adev->gfx.rlc.is_rlc_v2_1 = true;
1421
1422         adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
1423         adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
1424         adev->gfx.rlc.save_and_restore_offset =
1425                         le32_to_cpu(rlc_hdr->save_and_restore_offset);
1426         adev->gfx.rlc.clear_state_descriptor_offset =
1427                         le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
1428         adev->gfx.rlc.avail_scratch_ram_locations =
1429                         le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
1430         adev->gfx.rlc.reg_restore_list_size =
1431                         le32_to_cpu(rlc_hdr->reg_restore_list_size);
1432         adev->gfx.rlc.reg_list_format_start =
1433                         le32_to_cpu(rlc_hdr->reg_list_format_start);
1434         adev->gfx.rlc.reg_list_format_separate_start =
1435                         le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
1436         adev->gfx.rlc.starting_offsets_start =
1437                         le32_to_cpu(rlc_hdr->starting_offsets_start);
1438         adev->gfx.rlc.reg_list_format_size_bytes =
1439                         le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
1440         adev->gfx.rlc.reg_list_size_bytes =
1441                         le32_to_cpu(rlc_hdr->reg_list_size_bytes);
1442         adev->gfx.rlc.register_list_format =
1443                         kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
1444                                 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
1445         if (!adev->gfx.rlc.register_list_format) {
1446                 err = -ENOMEM;
1447                 goto out;
1448         }
1449
1450         tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1451                         le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
1452         for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
1453                 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
1454
1455         adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
1456
1457         tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1458                         le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
1459         for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
1460                 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
1461
1462         if (adev->gfx.rlc.is_rlc_v2_1)
1463                 gfx_v9_0_init_rlc_ext_microcode(adev);
1464
1465         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1466                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
1467                 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
1468                 info->fw = adev->gfx.rlc_fw;
1469                 header = (const struct common_firmware_header *)info->fw->data;
1470                 adev->firmware.fw_size +=
1471                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1472
1473                 if (adev->gfx.rlc.is_rlc_v2_1 &&
1474                     adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
1475                     adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
1476                     adev->gfx.rlc.save_restore_list_srm_size_bytes) {
1477                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
1478                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
1479                         info->fw = adev->gfx.rlc_fw;
1480                         adev->firmware.fw_size +=
1481                                 ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
1482
1483                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
1484                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
1485                         info->fw = adev->gfx.rlc_fw;
1486                         adev->firmware.fw_size +=
1487                                 ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
1488
1489                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
1490                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
1491                         info->fw = adev->gfx.rlc_fw;
1492                         adev->firmware.fw_size +=
1493                                 ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
1494                 }
1495         }
1496
1497 out:
1498         if (err) {
1499                 dev_err(adev->dev,
1500                         "gfx9: Failed to load firmware \"%s\"\n",
1501                         fw_name);
1502                 release_firmware(adev->gfx.rlc_fw);
1503                 adev->gfx.rlc_fw = NULL;
1504         }
1505         return err;
1506 }
1507
1508 static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
1509                                           const char *chip_name)
1510 {
1511         char fw_name[30];
1512         int err;
1513         struct amdgpu_firmware_info *info = NULL;
1514         const struct common_firmware_header *header = NULL;
1515         const struct gfx_firmware_header_v1_0 *cp_hdr;
1516
1517         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
1518         err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
1519         if (err)
1520                 goto out;
1521         err = amdgpu_ucode_validate(adev->gfx.mec_fw);
1522         if (err)
1523                 goto out;
1524         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1525         adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1526         adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1527
1528
1529         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
1530         err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
1531         if (!err) {
1532                 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
1533                 if (err)
1534                         goto out;
1535                 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1536                 adev->gfx.mec2_fw->data;
1537                 adev->gfx.mec2_fw_version =
1538                 le32_to_cpu(cp_hdr->header.ucode_version);
1539                 adev->gfx.mec2_feature_version =
1540                 le32_to_cpu(cp_hdr->ucode_feature_version);
1541         } else {
1542                 err = 0;
1543                 adev->gfx.mec2_fw = NULL;
1544         }
1545
1546         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1547                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
1548                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
1549                 info->fw = adev->gfx.mec_fw;
1550                 header = (const struct common_firmware_header *)info->fw->data;
1551                 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
1552                 adev->firmware.fw_size +=
1553                         ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1554
1555                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
1556                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
1557                 info->fw = adev->gfx.mec_fw;
1558                 adev->firmware.fw_size +=
1559                         ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1560
1561                 if (adev->gfx.mec2_fw) {
1562                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
1563                         info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
1564                         info->fw = adev->gfx.mec2_fw;
1565                         header = (const struct common_firmware_header *)info->fw->data;
1566                         cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
1567                         adev->firmware.fw_size +=
1568                                 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1569
1570                         /* TODO: Determine if MEC2 JT FW loading can be removed
1571                                  for all GFX V9 asic and above */
1572                         if (adev->asic_type != CHIP_ARCTURUS &&
1573                             adev->asic_type != CHIP_RENOIR) {
1574                                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
1575                                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
1576                                 info->fw = adev->gfx.mec2_fw;
1577                                 adev->firmware.fw_size +=
1578                                         ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4,
1579                                         PAGE_SIZE);
1580                         }
1581                 }
1582         }
1583
1584 out:
1585         gfx_v9_0_check_if_need_gfxoff(adev);
1586         gfx_v9_0_check_fw_write_wait(adev);
1587         if (err) {
1588                 dev_err(adev->dev,
1589                         "gfx9: Failed to load firmware \"%s\"\n",
1590                         fw_name);
1591                 release_firmware(adev->gfx.mec_fw);
1592                 adev->gfx.mec_fw = NULL;
1593                 release_firmware(adev->gfx.mec2_fw);
1594                 adev->gfx.mec2_fw = NULL;
1595         }
1596         return err;
1597 }
1598
1599 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
1600 {
1601         const char *chip_name;
1602         int r;
1603
1604         DRM_DEBUG("\n");
1605
1606         switch (adev->asic_type) {
1607         case CHIP_VEGA10:
1608                 chip_name = "vega10";
1609                 break;
1610         case CHIP_VEGA12:
1611                 chip_name = "vega12";
1612                 break;
1613         case CHIP_VEGA20:
1614                 chip_name = "vega20";
1615                 break;
1616         case CHIP_RAVEN:
1617                 if (adev->rev_id >= 8)
1618                         chip_name = "raven2";
1619                 else if (adev->pdev->device == 0x15d8)
1620                         chip_name = "picasso";
1621                 else
1622                         chip_name = "raven";
1623                 break;
1624         case CHIP_ARCTURUS:
1625                 chip_name = "arcturus";
1626                 break;
1627         case CHIP_RENOIR:
1628                 chip_name = "renoir";
1629                 break;
1630         default:
1631                 BUG();
1632         }
1633
1634         /* No CPG in Arcturus */
1635         if (adev->asic_type != CHIP_ARCTURUS) {
1636                 r = gfx_v9_0_init_cp_gfx_microcode(adev, chip_name);
1637                 if (r)
1638                         return r;
1639         }
1640
1641         r = gfx_v9_0_init_rlc_microcode(adev, chip_name);
1642         if (r)
1643                 return r;
1644
1645         r = gfx_v9_0_init_cp_compute_microcode(adev, chip_name);
1646         if (r)
1647                 return r;
1648
1649         return r;
1650 }
1651
1652 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
1653 {
1654         u32 count = 0;
1655         const struct cs_section_def *sect = NULL;
1656         const struct cs_extent_def *ext = NULL;
1657
1658         /* begin clear state */
1659         count += 2;
1660         /* context control state */
1661         count += 3;
1662
1663         for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
1664                 for (ext = sect->section; ext->extent != NULL; ++ext) {
1665                         if (sect->id == SECT_CONTEXT)
1666                                 count += 2 + ext->reg_count;
1667                         else
1668                                 return 0;
1669                 }
1670         }
1671
1672         /* end clear state */
1673         count += 2;
1674         /* clear state */
1675         count += 2;
1676
1677         return count;
1678 }
1679
1680 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
1681                                     volatile u32 *buffer)
1682 {
1683         u32 count = 0, i;
1684         const struct cs_section_def *sect = NULL;
1685         const struct cs_extent_def *ext = NULL;
1686
1687         if (adev->gfx.rlc.cs_data == NULL)
1688                 return;
1689         if (buffer == NULL)
1690                 return;
1691
1692         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1693         buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1694
1695         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
1696         buffer[count++] = cpu_to_le32(0x80000000);
1697         buffer[count++] = cpu_to_le32(0x80000000);
1698
1699         for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
1700                 for (ext = sect->section; ext->extent != NULL; ++ext) {
1701                         if (sect->id == SECT_CONTEXT) {
1702                                 buffer[count++] =
1703                                         cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
1704                                 buffer[count++] = cpu_to_le32(ext->reg_index -
1705                                                 PACKET3_SET_CONTEXT_REG_START);
1706                                 for (i = 0; i < ext->reg_count; i++)
1707                                         buffer[count++] = cpu_to_le32(ext->extent[i]);
1708                         } else {
1709                                 return;
1710                         }
1711                 }
1712         }
1713
1714         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1715         buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
1716
1717         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
1718         buffer[count++] = cpu_to_le32(0);
1719 }
1720
1721 static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
1722 {
1723         struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
1724         uint32_t pg_always_on_cu_num = 2;
1725         uint32_t always_on_cu_num;
1726         uint32_t i, j, k;
1727         uint32_t mask, cu_bitmap, counter;
1728
1729         if (adev->flags & AMD_IS_APU)
1730                 always_on_cu_num = 4;
1731         else if (adev->asic_type == CHIP_VEGA12)
1732                 always_on_cu_num = 8;
1733         else
1734                 always_on_cu_num = 12;
1735
1736         mutex_lock(&adev->grbm_idx_mutex);
1737         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1738                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1739                         mask = 1;
1740                         cu_bitmap = 0;
1741                         counter = 0;
1742                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1743
1744                         for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
1745                                 if (cu_info->bitmap[i][j] & mask) {
1746                                         if (counter == pg_always_on_cu_num)
1747                                                 WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
1748                                         if (counter < always_on_cu_num)
1749                                                 cu_bitmap |= mask;
1750                                         else
1751                                                 break;
1752                                         counter++;
1753                                 }
1754                                 mask <<= 1;
1755                         }
1756
1757                         WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap);
1758                         cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
1759                 }
1760         }
1761         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1762         mutex_unlock(&adev->grbm_idx_mutex);
1763 }
1764
1765 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
1766 {
1767         uint32_t data;
1768
1769         /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1770         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1771         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
1772         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1773         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));
1774
1775         /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1776         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1777
1778         /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1779         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);
1780
1781         mutex_lock(&adev->grbm_idx_mutex);
1782         /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1783         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1784         WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1785
1786         /* set mmRLC_LB_PARAMS = 0x003F_1006 */
1787         data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1788         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1789         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1790         WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1791
1792         /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1793         data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1794         data &= 0x0000FFFF;
1795         data |= 0x00C00000;
1796         WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1797
1798         /*
1799          * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven),
1800          * programmed in gfx_v9_0_init_always_on_cu_mask()
1801          */
1802
1803         /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1804          * but used for RLC_LB_CNTL configuration */
1805         data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1806         data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1807         data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1808         WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1809         mutex_unlock(&adev->grbm_idx_mutex);
1810
1811         gfx_v9_0_init_always_on_cu_mask(adev);
1812 }
1813
1814 static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
1815 {
1816         uint32_t data;
1817
1818         /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1819         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1820         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8);
1821         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1822         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16));
1823
1824         /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1825         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1826
1827         /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1828         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800);
1829
1830         mutex_lock(&adev->grbm_idx_mutex);
1831         /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1832         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1833         WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1834
1835         /* set mmRLC_LB_PARAMS = 0x003F_1006 */
1836         data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1837         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1838         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1839         WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1840
1841         /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1842         data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1843         data &= 0x0000FFFF;
1844         data |= 0x00C00000;
1845         WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1846
1847         /*
1848          * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON),
1849          * programmed in gfx_v9_0_init_always_on_cu_mask()
1850          */
1851
1852         /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1853          * but used for RLC_LB_CNTL configuration */
1854         data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1855         data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1856         data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1857         WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1858         mutex_unlock(&adev->grbm_idx_mutex);
1859
1860         gfx_v9_0_init_always_on_cu_mask(adev);
1861 }
1862
1863 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
1864 {
1865         WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
1866 }
1867
1868 static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
1869 {
1870         return 5;
1871 }
1872
1873 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
1874 {
1875         const struct cs_section_def *cs_data;
1876         int r;
1877
1878         adev->gfx.rlc.cs_data = gfx9_cs_data;
1879
1880         cs_data = adev->gfx.rlc.cs_data;
1881
1882         if (cs_data) {
1883                 /* init clear state block */
1884                 r = amdgpu_gfx_rlc_init_csb(adev);
1885                 if (r)
1886                         return r;
1887         }
1888
1889         if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR) {
1890                 /* TODO: double check the cp_table_size for RV */
1891                 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1892                 r = amdgpu_gfx_rlc_init_cpt(adev);
1893                 if (r)
1894                         return r;
1895         }
1896
1897         switch (adev->asic_type) {
1898         case CHIP_RAVEN:
1899                 gfx_v9_0_init_lbpw(adev);
1900                 break;
1901         case CHIP_VEGA20:
1902                 gfx_v9_4_init_lbpw(adev);
1903                 break;
1904         default:
1905                 break;
1906         }
1907
1908         /* init spm vmid with 0xf */
1909         if (adev->gfx.rlc.funcs->update_spm_vmid)
1910                 adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
1911
1912         return 0;
1913 }
1914
1915 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
1916 {
1917         amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1918         amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
1919 }
1920
1921 static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
1922 {
1923         int r;
1924         u32 *hpd;
1925         const __le32 *fw_data;
1926         unsigned fw_size;
1927         u32 *fw;
1928         size_t mec_hpd_size;
1929
1930         const struct gfx_firmware_header_v1_0 *mec_hdr;
1931
1932         bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1933
1934         /* take ownership of the relevant compute queues */
1935         amdgpu_gfx_compute_queue_acquire(adev);
1936         mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
1937
1938         r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1939                                       AMDGPU_GEM_DOMAIN_VRAM,
1940                                       &adev->gfx.mec.hpd_eop_obj,
1941                                       &adev->gfx.mec.hpd_eop_gpu_addr,
1942                                       (void **)&hpd);
1943         if (r) {
1944                 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1945                 gfx_v9_0_mec_fini(adev);
1946                 return r;
1947         }
1948
1949         memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
1950
1951         amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1952         amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1953
1954         mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1955
1956         fw_data = (const __le32 *)
1957                 (adev->gfx.mec_fw->data +
1958                  le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1959         fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
1960
1961         r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
1962                                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1963                                       &adev->gfx.mec.mec_fw_obj,
1964                                       &adev->gfx.mec.mec_fw_gpu_addr,
1965                                       (void **)&fw);
1966         if (r) {
1967                 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
1968                 gfx_v9_0_mec_fini(adev);
1969                 return r;
1970         }
1971
1972         memcpy(fw, fw_data, fw_size);
1973
1974         amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1975         amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1976
1977         return 0;
1978 }
1979
1980 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
1981 {
1982         WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
1983                 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1984                 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1985                 (address << SQ_IND_INDEX__INDEX__SHIFT) |
1986                 (SQ_IND_INDEX__FORCE_READ_MASK));
1987         return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1988 }
1989
1990 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
1991                            uint32_t wave, uint32_t thread,
1992                            uint32_t regno, uint32_t num, uint32_t *out)
1993 {
1994         WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
1995                 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1996                 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1997                 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
1998                 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
1999                 (SQ_IND_INDEX__FORCE_READ_MASK) |
2000                 (SQ_IND_INDEX__AUTO_INCR_MASK));
2001         while (num--)
2002                 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
2003 }
2004
2005 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
2006 {
2007         /* type 1 wave data */
2008         dst[(*no_fields)++] = 1;
2009         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
2010         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
2011         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
2012         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
2013         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
2014         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
2015         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
2016         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
2017         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
2018         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
2019         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
2020         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
2021         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
2022         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
2023 }
2024
2025 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
2026                                      uint32_t wave, uint32_t start,
2027                                      uint32_t size, uint32_t *dst)
2028 {
2029         wave_read_regs(
2030                 adev, simd, wave, 0,
2031                 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
2032 }
2033
2034 static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
2035                                      uint32_t wave, uint32_t thread,
2036                                      uint32_t start, uint32_t size,
2037                                      uint32_t *dst)
2038 {
2039         wave_read_regs(
2040                 adev, simd, wave, thread,
2041                 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
2042 }
2043
2044 static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
2045                                   u32 me, u32 pipe, u32 q, u32 vm)
2046 {
2047         soc15_grbm_select(adev, me, pipe, q, vm);
2048 }
2049
2050 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
2051         .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
2052         .select_se_sh = &gfx_v9_0_select_se_sh,
2053         .read_wave_data = &gfx_v9_0_read_wave_data,
2054         .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
2055         .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
2056         .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
2057         .ras_error_inject = &gfx_v9_0_ras_error_inject,
2058         .query_ras_error_count = &gfx_v9_0_query_ras_error_count,
2059         .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
2060 };
2061
2062 static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
2063         .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
2064         .select_se_sh = &gfx_v9_0_select_se_sh,
2065         .read_wave_data = &gfx_v9_0_read_wave_data,
2066         .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
2067         .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
2068         .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
2069         .ras_error_inject = &gfx_v9_4_ras_error_inject,
2070         .query_ras_error_count = &gfx_v9_4_query_ras_error_count,
2071         .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
2072 };
2073
2074 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
2075 {
2076         u32 gb_addr_config;
2077         int err;
2078
2079         adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
2080
2081         switch (adev->asic_type) {
2082         case CHIP_VEGA10:
2083                 adev->gfx.config.max_hw_contexts = 8;
2084                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2085                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2086                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2087                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2088                 gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
2089                 break;
2090         case CHIP_VEGA12:
2091                 adev->gfx.config.max_hw_contexts = 8;
2092                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2093                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2094                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2095                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2096                 gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
2097                 DRM_INFO("fix gfx.config for vega12\n");
2098                 break;
2099         case CHIP_VEGA20:
2100                 adev->gfx.config.max_hw_contexts = 8;
2101                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2102                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2103                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2104                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2105                 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2106                 gb_addr_config &= ~0xf3e777ff;
2107                 gb_addr_config |= 0x22014042;
2108                 /* check vbios table if gpu info is not available */
2109                 err = amdgpu_atomfirmware_get_gfx_info(adev);
2110                 if (err)
2111                         return err;
2112                 break;
2113         case CHIP_RAVEN:
2114                 adev->gfx.config.max_hw_contexts = 8;
2115                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2116                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2117                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2118                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2119                 if (adev->rev_id >= 8)
2120                         gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
2121                 else
2122                         gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
2123                 break;
2124         case CHIP_ARCTURUS:
2125                 adev->gfx.funcs = &gfx_v9_4_gfx_funcs;
2126                 adev->gfx.config.max_hw_contexts = 8;
2127                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2128                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2129                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2130                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2131                 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2132                 gb_addr_config &= ~0xf3e777ff;
2133                 gb_addr_config |= 0x22014042;
2134                 break;
2135         case CHIP_RENOIR:
2136                 adev->gfx.config.max_hw_contexts = 8;
2137                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2138                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2139                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
2140                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2141                 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2142                 gb_addr_config &= ~0xf3e777ff;
2143                 gb_addr_config |= 0x22010042;
2144                 break;
2145         default:
2146                 BUG();
2147                 break;
2148         }
2149
2150         adev->gfx.config.gb_addr_config = gb_addr_config;
2151
2152         adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
2153                         REG_GET_FIELD(
2154                                         adev->gfx.config.gb_addr_config,
2155                                         GB_ADDR_CONFIG,
2156                                         NUM_PIPES);
2157
2158         adev->gfx.config.max_tile_pipes =
2159                 adev->gfx.config.gb_addr_config_fields.num_pipes;
2160
2161         adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
2162                         REG_GET_FIELD(
2163                                         adev->gfx.config.gb_addr_config,
2164                                         GB_ADDR_CONFIG,
2165                                         NUM_BANKS);
2166         adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
2167                         REG_GET_FIELD(
2168                                         adev->gfx.config.gb_addr_config,
2169                                         GB_ADDR_CONFIG,
2170                                         MAX_COMPRESSED_FRAGS);
2171         adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
2172                         REG_GET_FIELD(
2173                                         adev->gfx.config.gb_addr_config,
2174                                         GB_ADDR_CONFIG,
2175                                         NUM_RB_PER_SE);
2176         adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
2177                         REG_GET_FIELD(
2178                                         adev->gfx.config.gb_addr_config,
2179                                         GB_ADDR_CONFIG,
2180                                         NUM_SHADER_ENGINES);
2181         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
2182                         REG_GET_FIELD(
2183                                         adev->gfx.config.gb_addr_config,
2184                                         GB_ADDR_CONFIG,
2185                                         PIPE_INTERLEAVE_SIZE));
2186
2187         return 0;
2188 }
2189
2190 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
2191                                       int mec, int pipe, int queue)
2192 {
2193         int r;
2194         unsigned irq_type;
2195         struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
2196
2197         ring = &adev->gfx.compute_ring[ring_id];
2198
2199         /* mec0 is me1 */
2200         ring->me = mec + 1;
2201         ring->pipe = pipe;
2202         ring->queue = queue;
2203
2204         ring->ring_obj = NULL;
2205         ring->use_doorbell = true;
2206         ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
2207         ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
2208                                 + (ring_id * GFX9_MEC_HPD_SIZE);
2209         sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
2210
2211         irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
2212                 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
2213                 + ring->pipe;
2214
2215         /* type-2 packets are deprecated on MEC, use type-3 instead */
2216         r = amdgpu_ring_init(adev, ring, 1024,
2217                              &adev->gfx.eop_irq, irq_type);
2218         if (r)
2219                 return r;
2220
2221
2222         return 0;
2223 }
2224
2225 static int gfx_v9_0_sw_init(void *handle)
2226 {
2227         int i, j, k, r, ring_id;
2228         struct amdgpu_ring *ring;
2229         struct amdgpu_kiq *kiq;
2230         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2231
2232         switch (adev->asic_type) {
2233         case CHIP_VEGA10:
2234         case CHIP_VEGA12:
2235         case CHIP_VEGA20:
2236         case CHIP_RAVEN:
2237         case CHIP_ARCTURUS:
2238         case CHIP_RENOIR:
2239                 adev->gfx.mec.num_mec = 2;
2240                 break;
2241         default:
2242                 adev->gfx.mec.num_mec = 1;
2243                 break;
2244         }
2245
2246         adev->gfx.mec.num_pipe_per_mec = 4;
2247         adev->gfx.mec.num_queue_per_pipe = 8;
2248
2249         /* EOP Event */
2250         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
2251         if (r)
2252                 return r;
2253
2254         /* Privileged reg */
2255         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
2256                               &adev->gfx.priv_reg_irq);
2257         if (r)
2258                 return r;
2259
2260         /* Privileged inst */
2261         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
2262                               &adev->gfx.priv_inst_irq);
2263         if (r)
2264                 return r;
2265
2266         /* ECC error */
2267         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_ECC_ERROR,
2268                               &adev->gfx.cp_ecc_error_irq);
2269         if (r)
2270                 return r;
2271
2272         /* FUE error */
2273         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_FUE_ERROR,
2274                               &adev->gfx.cp_ecc_error_irq);
2275         if (r)
2276                 return r;
2277
2278         adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
2279
2280         gfx_v9_0_scratch_init(adev);
2281
2282         r = gfx_v9_0_init_microcode(adev);
2283         if (r) {
2284                 DRM_ERROR("Failed to load gfx firmware!\n");
2285                 return r;
2286         }
2287
2288         r = adev->gfx.rlc.funcs->init(adev);
2289         if (r) {
2290                 DRM_ERROR("Failed to init rlc BOs!\n");
2291                 return r;
2292         }
2293
2294         r = gfx_v9_0_mec_init(adev);
2295         if (r) {
2296                 DRM_ERROR("Failed to init MEC BOs!\n");
2297                 return r;
2298         }
2299
2300         /* set up the gfx ring */
2301         for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2302                 ring = &adev->gfx.gfx_ring[i];
2303                 ring->ring_obj = NULL;
2304                 if (!i)
2305                         sprintf(ring->name, "gfx");
2306                 else
2307                         sprintf(ring->name, "gfx_%d", i);
2308                 ring->use_doorbell = true;
2309                 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
2310                 r = amdgpu_ring_init(adev, ring, 1024,
2311                                      &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
2312                 if (r)
2313                         return r;
2314         }
2315
2316         /* set up the compute queues - allocate horizontally across pipes */
2317         ring_id = 0;
2318         for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
2319                 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
2320                         for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
2321                                 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
2322                                         continue;
2323
2324                                 r = gfx_v9_0_compute_ring_init(adev,
2325                                                                ring_id,
2326                                                                i, k, j);
2327                                 if (r)
2328                                         return r;
2329
2330                                 ring_id++;
2331                         }
2332                 }
2333         }
2334
2335         r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE);
2336         if (r) {
2337                 DRM_ERROR("Failed to init KIQ BOs!\n");
2338                 return r;
2339         }
2340
2341         kiq = &adev->gfx.kiq;
2342         r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
2343         if (r)
2344                 return r;
2345
2346         /* create MQD for all compute queues as wel as KIQ for SRIOV case */
2347         r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
2348         if (r)
2349                 return r;
2350
2351         adev->gfx.ce_ram_size = 0x8000;
2352
2353         r = gfx_v9_0_gpu_early_init(adev);
2354         if (r)
2355                 return r;
2356
2357         return 0;
2358 }
2359
2360
2361 static int gfx_v9_0_sw_fini(void *handle)
2362 {
2363         int i;
2364         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2365
2366         amdgpu_gfx_ras_fini(adev);
2367
2368         for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2369                 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
2370         for (i = 0; i < adev->gfx.num_compute_rings; i++)
2371                 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
2372
2373         amdgpu_gfx_mqd_sw_fini(adev);
2374         amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
2375         amdgpu_gfx_kiq_fini(adev);
2376
2377         gfx_v9_0_mec_fini(adev);
2378         amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
2379         if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR) {
2380                 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
2381                                 &adev->gfx.rlc.cp_table_gpu_addr,
2382                                 (void **)&adev->gfx.rlc.cp_table_ptr);
2383         }
2384         gfx_v9_0_free_microcode(adev);
2385
2386         return 0;
2387 }
2388
2389
2390 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
2391 {
2392         /* TODO */
2393 }
2394
2395 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
2396 {
2397         u32 data;
2398
2399         if (instance == 0xffffffff)
2400                 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
2401         else
2402                 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
2403
2404         if (se_num == 0xffffffff)
2405                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
2406         else
2407                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
2408
2409         if (sh_num == 0xffffffff)
2410                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
2411         else
2412                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
2413
2414         WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
2415 }
2416
2417 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
2418 {
2419         u32 data, mask;
2420
2421         data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
2422         data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
2423
2424         data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
2425         data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
2426
2427         mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
2428                                          adev->gfx.config.max_sh_per_se);
2429
2430         return (~data) & mask;
2431 }
2432
2433 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
2434 {
2435         int i, j;
2436         u32 data;
2437         u32 active_rbs = 0;
2438         u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
2439                                         adev->gfx.config.max_sh_per_se;
2440
2441         mutex_lock(&adev->grbm_idx_mutex);
2442         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2443                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2444                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
2445                         data = gfx_v9_0_get_rb_active_bitmap(adev);
2446                         active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
2447                                                rb_bitmap_width_per_sh);
2448                 }
2449         }
2450         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2451         mutex_unlock(&adev->grbm_idx_mutex);
2452
2453         adev->gfx.config.backend_enable_mask = active_rbs;
2454         adev->gfx.config.num_rbs = hweight32(active_rbs);
2455 }
2456
2457 #define DEFAULT_SH_MEM_BASES    (0x6000)
2458 #define FIRST_COMPUTE_VMID      (8)
2459 #define LAST_COMPUTE_VMID       (16)
2460 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
2461 {
2462         int i;
2463         uint32_t sh_mem_config;
2464         uint32_t sh_mem_bases;
2465
2466         /*
2467          * Configure apertures:
2468          * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
2469          * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
2470          * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
2471          */
2472         sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
2473
2474         sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
2475                         SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
2476                         SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
2477
2478         mutex_lock(&adev->srbm_mutex);
2479         for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
2480                 soc15_grbm_select(adev, 0, 0, 0, i);
2481                 /* CP and shaders */
2482                 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
2483                 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
2484         }
2485         soc15_grbm_select(adev, 0, 0, 0, 0);
2486         mutex_unlock(&adev->srbm_mutex);
2487
2488         /* Initialize all compute VMIDs to have no GDS, GWS, or OA
2489            acccess. These should be enabled by FW for target VMIDs. */
2490         for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
2491                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
2492                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
2493                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
2494                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
2495         }
2496 }
2497
2498 static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev)
2499 {
2500         int vmid;
2501
2502         /*
2503          * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
2504          * access. Compute VMIDs should be enabled by FW for target VMIDs,
2505          * the driver can enable them for graphics. VMID0 should maintain
2506          * access so that HWS firmware can save/restore entries.
2507          */
2508         for (vmid = 1; vmid < 16; vmid++) {
2509                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0);
2510                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0);
2511                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0);
2512                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, vmid, 0);
2513         }
2514 }
2515
2516 static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
2517 {
2518         uint32_t tmp;
2519
2520         switch (adev->asic_type) {
2521         case CHIP_ARCTURUS:
2522                 tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG);
2523                 tmp = REG_SET_FIELD(tmp, SQ_CONFIG,
2524                                         DISABLE_BARRIER_WAITCNT, 1);
2525                 WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp);
2526                 break;
2527         default:
2528                 break;
2529         };
2530 }
2531
2532 static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
2533 {
2534         u32 tmp;
2535         int i;
2536
2537         WREG32_FIELD15_RLC(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
2538
2539         gfx_v9_0_tiling_mode_table_init(adev);
2540
2541         gfx_v9_0_setup_rb(adev);
2542         gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
2543         adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
2544
2545         /* XXX SH_MEM regs */
2546         /* where to put LDS, scratch, GPUVM in FSA64 space */
2547         mutex_lock(&adev->srbm_mutex);
2548         for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) {
2549                 soc15_grbm_select(adev, 0, 0, 0, i);
2550                 /* CP and shaders */
2551                 if (i == 0) {
2552                         tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2553                                             SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2554                         tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2555                                             !!amdgpu_noretry);
2556                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2557                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, 0);
2558                 } else {
2559                         tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2560                                             SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2561                         tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2562                                             !!amdgpu_noretry);
2563                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2564                         tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
2565                                 (adev->gmc.private_aperture_start >> 48));
2566                         tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
2567                                 (adev->gmc.shared_aperture_start >> 48));
2568                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, tmp);
2569                 }
2570         }
2571         soc15_grbm_select(adev, 0, 0, 0, 0);
2572
2573         mutex_unlock(&adev->srbm_mutex);
2574
2575         gfx_v9_0_init_compute_vmid(adev);
2576         gfx_v9_0_init_gds_vmid(adev);
2577         gfx_v9_0_init_sq_config(adev);
2578 }
2579
2580 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
2581 {
2582         u32 i, j, k;
2583         u32 mask;
2584
2585         mutex_lock(&adev->grbm_idx_mutex);
2586         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2587                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2588                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
2589                         for (k = 0; k < adev->usec_timeout; k++) {
2590                                 if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
2591                                         break;
2592                                 udelay(1);
2593                         }
2594                         if (k == adev->usec_timeout) {
2595                                 gfx_v9_0_select_se_sh(adev, 0xffffffff,
2596                                                       0xffffffff, 0xffffffff);
2597                                 mutex_unlock(&adev->grbm_idx_mutex);
2598                                 DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
2599                                          i, j);
2600                                 return;
2601                         }
2602                 }
2603         }
2604         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2605         mutex_unlock(&adev->grbm_idx_mutex);
2606
2607         mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
2608                 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
2609                 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
2610                 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
2611         for (k = 0; k < adev->usec_timeout; k++) {
2612                 if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
2613                         break;
2614                 udelay(1);
2615         }
2616 }
2617
2618 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
2619                                                bool enable)
2620 {
2621         u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
2622
2623         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
2624         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
2625         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
2626         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
2627
2628         WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
2629 }
2630
2631 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
2632 {
2633         adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
2634         /* csib */
2635         WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
2636                         adev->gfx.rlc.clear_state_gpu_addr >> 32);
2637         WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
2638                         adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
2639         WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
2640                         adev->gfx.rlc.clear_state_size);
2641 }
2642
2643 static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
2644                                 int indirect_offset,
2645                                 int list_size,
2646                                 int *unique_indirect_regs,
2647                                 int unique_indirect_reg_count,
2648                                 int *indirect_start_offsets,
2649                                 int *indirect_start_offsets_count,
2650                                 int max_start_offsets_count)
2651 {
2652         int idx;
2653
2654         for (; indirect_offset < list_size; indirect_offset++) {
2655                 WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count);
2656                 indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
2657                 *indirect_start_offsets_count = *indirect_start_offsets_count + 1;
2658
2659                 while (register_list_format[indirect_offset] != 0xFFFFFFFF) {
2660                         indirect_offset += 2;
2661
2662                         /* look for the matching indice */
2663                         for (idx = 0; idx < unique_indirect_reg_count; idx++) {
2664                                 if (unique_indirect_regs[idx] ==
2665                                         register_list_format[indirect_offset] ||
2666                                         !unique_indirect_regs[idx])
2667                                         break;
2668                         }
2669
2670                         BUG_ON(idx >= unique_indirect_reg_count);
2671
2672                         if (!unique_indirect_regs[idx])
2673                                 unique_indirect_regs[idx] = register_list_format[indirect_offset];
2674
2675                         indirect_offset++;
2676                 }
2677         }
2678 }
2679
2680 static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
2681 {
2682         int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2683         int unique_indirect_reg_count = 0;
2684
2685         int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2686         int indirect_start_offsets_count = 0;
2687
2688         int list_size = 0;
2689         int i = 0, j = 0;
2690         u32 tmp = 0;
2691
2692         u32 *register_list_format =
2693                 kmemdup(adev->gfx.rlc.register_list_format,
2694                         adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
2695         if (!register_list_format)
2696                 return -ENOMEM;
2697
2698         /* setup unique_indirect_regs array and indirect_start_offsets array */
2699         unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
2700         gfx_v9_1_parse_ind_reg_list(register_list_format,
2701                                     adev->gfx.rlc.reg_list_format_direct_reg_list_length,
2702                                     adev->gfx.rlc.reg_list_format_size_bytes >> 2,
2703                                     unique_indirect_regs,
2704                                     unique_indirect_reg_count,
2705                                     indirect_start_offsets,
2706                                     &indirect_start_offsets_count,
2707                                     ARRAY_SIZE(indirect_start_offsets));
2708
2709         /* enable auto inc in case it is disabled */
2710         tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
2711         tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
2712         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
2713
2714         /* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
2715         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
2716                 RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
2717         for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
2718                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
2719                         adev->gfx.rlc.register_restore[i]);
2720
2721         /* load indirect register */
2722         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2723                 adev->gfx.rlc.reg_list_format_start);
2724
2725         /* direct register portion */
2726         for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++)
2727                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2728                         register_list_format[i]);
2729
2730         /* indirect register portion */
2731         while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) {
2732                 if (register_list_format[i] == 0xFFFFFFFF) {
2733                         WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2734                         continue;
2735                 }
2736
2737                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2738                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2739
2740                 for (j = 0; j < unique_indirect_reg_count; j++) {
2741                         if (register_list_format[i] == unique_indirect_regs[j]) {
2742                                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j);
2743                                 break;
2744                         }
2745                 }
2746
2747                 BUG_ON(j >= unique_indirect_reg_count);
2748
2749                 i++;
2750         }
2751
2752         /* set save/restore list size */
2753         list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
2754         list_size = list_size >> 1;
2755         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2756                 adev->gfx.rlc.reg_restore_list_size);
2757         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
2758
2759         /* write the starting offsets to RLC scratch ram */
2760         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2761                 adev->gfx.rlc.starting_offsets_start);
2762         for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
2763                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2764                        indirect_start_offsets[i]);
2765
2766         /* load unique indirect regs*/
2767         for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
2768                 if (unique_indirect_regs[i] != 0) {
2769                         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0)
2770                                + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i],
2771                                unique_indirect_regs[i] & 0x3FFFF);
2772
2773                         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0)
2774                                + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i],
2775                                unique_indirect_regs[i] >> 20);
2776                 }
2777         }
2778
2779         kfree(register_list_format);
2780         return 0;
2781 }
2782
2783 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
2784 {
2785         WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
2786 }
2787
2788 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
2789                                              bool enable)
2790 {
2791         uint32_t data = 0;
2792         uint32_t default_data = 0;
2793
2794         default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
2795         if (enable == true) {
2796                 /* enable GFXIP control over CGPG */
2797                 data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2798                 if(default_data != data)
2799                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2800
2801                 /* update status */
2802                 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
2803                 data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
2804                 if(default_data != data)
2805                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2806         } else {
2807                 /* restore GFXIP control over GCPG */
2808                 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2809                 if(default_data != data)
2810                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2811         }
2812 }
2813
2814 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
2815 {
2816         uint32_t data = 0;
2817
2818         if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2819                               AMD_PG_SUPPORT_GFX_SMG |
2820                               AMD_PG_SUPPORT_GFX_DMG)) {
2821                 /* init IDLE_POLL_COUNT = 60 */
2822                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
2823                 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
2824                 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2825                 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
2826
2827                 /* init RLC PG Delay */
2828                 data = 0;
2829                 data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
2830                 data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
2831                 data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
2832                 data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
2833                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
2834
2835                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
2836                 data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
2837                 data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
2838                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
2839
2840                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
2841                 data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
2842                 data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
2843                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
2844
2845                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
2846                 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
2847
2848                 /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
2849                 data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
2850                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
2851
2852                 pwr_10_0_gfxip_control_over_cgpg(adev, true);
2853         }
2854 }
2855
2856 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
2857                                                 bool enable)
2858 {
2859         uint32_t data = 0;
2860         uint32_t default_data = 0;
2861
2862         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2863         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2864                              SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
2865                              enable ? 1 : 0);
2866         if (default_data != data)
2867                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2868 }
2869
2870 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
2871                                                 bool enable)
2872 {
2873         uint32_t data = 0;
2874         uint32_t default_data = 0;
2875
2876         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2877         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2878                              SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
2879                              enable ? 1 : 0);
2880         if(default_data != data)
2881                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2882 }
2883
2884 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
2885                                         bool enable)
2886 {
2887         uint32_t data = 0;
2888         uint32_t default_data = 0;
2889
2890         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2891         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2892                              CP_PG_DISABLE,
2893                              enable ? 0 : 1);
2894         if(default_data != data)
2895                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2896 }
2897
2898 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
2899                                                 bool enable)
2900 {
2901         uint32_t data, default_data;
2902
2903         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2904         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2905                              GFX_POWER_GATING_ENABLE,
2906                              enable ? 1 : 0);
2907         if(default_data != data)
2908                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2909 }
2910
2911 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
2912                                                 bool enable)
2913 {
2914         uint32_t data, default_data;
2915
2916         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2917         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2918                              GFX_PIPELINE_PG_ENABLE,
2919                              enable ? 1 : 0);
2920         if(default_data != data)
2921                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2922
2923         if (!enable)
2924                 /* read any GFX register to wake up GFX */
2925                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
2926 }
2927
2928 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
2929                                                        bool enable)
2930 {
2931         uint32_t data, default_data;
2932
2933         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2934         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2935                              STATIC_PER_CU_PG_ENABLE,
2936                              enable ? 1 : 0);
2937         if(default_data != data)
2938                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2939 }
2940
2941 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
2942                                                 bool enable)
2943 {
2944         uint32_t data, default_data;
2945
2946         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2947         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2948                              DYN_PER_CU_PG_ENABLE,
2949                              enable ? 1 : 0);
2950         if(default_data != data)
2951                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2952 }
2953
2954 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
2955 {
2956         gfx_v9_0_init_csb(adev);
2957
2958         /*
2959          * Rlc save restore list is workable since v2_1.
2960          * And it's needed by gfxoff feature.
2961          */
2962         if (adev->gfx.rlc.is_rlc_v2_1) {
2963                 if (adev->asic_type == CHIP_VEGA12 ||
2964                     (adev->asic_type == CHIP_RAVEN &&
2965                      adev->rev_id >= 8))
2966                         gfx_v9_1_init_rlc_save_restore_list(adev);
2967                 gfx_v9_0_enable_save_restore_machine(adev);
2968         }
2969
2970         if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2971                               AMD_PG_SUPPORT_GFX_SMG |
2972                               AMD_PG_SUPPORT_GFX_DMG |
2973                               AMD_PG_SUPPORT_CP |
2974                               AMD_PG_SUPPORT_GDS |
2975                               AMD_PG_SUPPORT_RLC_SMU_HS)) {
2976                 WREG32(mmRLC_JUMP_TABLE_RESTORE,
2977                        adev->gfx.rlc.cp_table_gpu_addr >> 8);
2978                 gfx_v9_0_init_gfx_power_gating(adev);
2979         }
2980 }
2981
2982 void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
2983 {
2984         WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
2985         gfx_v9_0_enable_gui_idle_interrupt(adev, false);
2986         gfx_v9_0_wait_for_rlc_serdes(adev);
2987 }
2988
2989 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
2990 {
2991         WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2992         udelay(50);
2993         WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
2994         udelay(50);
2995 }
2996
2997 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
2998 {
2999 #ifdef AMDGPU_RLC_DEBUG_RETRY
3000         u32 rlc_ucode_ver;
3001 #endif
3002
3003         WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
3004         udelay(50);
3005
3006         /* carrizo do enable cp interrupt after cp inited */
3007         if (!(adev->flags & AMD_IS_APU)) {
3008                 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3009                 udelay(50);
3010         }
3011
3012 #ifdef AMDGPU_RLC_DEBUG_RETRY
3013         /* RLC_GPM_GENERAL_6 : RLC Ucode version */
3014         rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
3015         if(rlc_ucode_ver == 0x108) {
3016                 DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
3017                                 rlc_ucode_ver, adev->gfx.rlc_fw_version);
3018                 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
3019                  * default is 0x9C4 to create a 100us interval */
3020                 WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
3021                 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
3022                  * to disable the page fault retry interrupts, default is
3023                  * 0x100 (256) */
3024                 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
3025         }
3026 #endif
3027 }
3028
3029 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
3030 {
3031         const struct rlc_firmware_header_v2_0 *hdr;
3032         const __le32 *fw_data;
3033         unsigned i, fw_size;
3034
3035         if (!adev->gfx.rlc_fw)
3036                 return -EINVAL;
3037
3038         hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
3039         amdgpu_ucode_print_rlc_hdr(&hdr->header);
3040
3041         fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
3042                            le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3043         fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
3044
3045         WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
3046                         RLCG_UCODE_LOADING_START_ADDRESS);
3047         for (i = 0; i < fw_size; i++)
3048                 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
3049         WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
3050
3051         return 0;
3052 }
3053
3054 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
3055 {
3056         int r;
3057
3058         if (amdgpu_sriov_vf(adev)) {
3059                 gfx_v9_0_init_csb(adev);
3060                 return 0;
3061         }
3062
3063         adev->gfx.rlc.funcs->stop(adev);
3064
3065         /* disable CG */
3066         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
3067
3068         gfx_v9_0_init_pg(adev);
3069
3070         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3071                 /* legacy rlc firmware loading */
3072                 r = gfx_v9_0_rlc_load_microcode(adev);
3073                 if (r)
3074                         return r;
3075         }
3076
3077         switch (adev->asic_type) {
3078         case CHIP_RAVEN:
3079                 if (amdgpu_lbpw == 0)
3080                         gfx_v9_0_enable_lbpw(adev, false);
3081                 else
3082                         gfx_v9_0_enable_lbpw(adev, true);
3083                 break;
3084         case CHIP_VEGA20:
3085                 if (amdgpu_lbpw > 0)
3086                         gfx_v9_0_enable_lbpw(adev, true);
3087                 else
3088                         gfx_v9_0_enable_lbpw(adev, false);
3089                 break;
3090         default:
3091                 break;
3092         }
3093
3094         adev->gfx.rlc.funcs->start(adev);
3095
3096         return 0;
3097 }
3098
3099 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
3100 {
3101         int i;
3102         u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
3103
3104         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
3105         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
3106         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
3107         if (!enable) {
3108                 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
3109                         adev->gfx.gfx_ring[i].sched.ready = false;
3110         }
3111         WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
3112         udelay(50);
3113 }
3114
3115 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
3116 {
3117         const struct gfx_firmware_header_v1_0 *pfp_hdr;
3118         const struct gfx_firmware_header_v1_0 *ce_hdr;
3119         const struct gfx_firmware_header_v1_0 *me_hdr;
3120         const __le32 *fw_data;
3121         unsigned i, fw_size;
3122
3123         if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
3124                 return -EINVAL;
3125
3126         pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
3127                 adev->gfx.pfp_fw->data;
3128         ce_hdr = (const struct gfx_firmware_header_v1_0 *)
3129                 adev->gfx.ce_fw->data;
3130         me_hdr = (const struct gfx_firmware_header_v1_0 *)
3131                 adev->gfx.me_fw->data;
3132
3133         amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
3134         amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
3135         amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
3136
3137         gfx_v9_0_cp_gfx_enable(adev, false);
3138
3139         /* PFP */
3140         fw_data = (const __le32 *)
3141                 (adev->gfx.pfp_fw->data +
3142                  le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
3143         fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
3144         WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
3145         for (i = 0; i < fw_size; i++)
3146                 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
3147         WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
3148
3149         /* CE */
3150         fw_data = (const __le32 *)
3151                 (adev->gfx.ce_fw->data +
3152                  le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
3153         fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
3154         WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
3155         for (i = 0; i < fw_size; i++)
3156                 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
3157         WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
3158
3159         /* ME */
3160         fw_data = (const __le32 *)
3161                 (adev->gfx.me_fw->data +
3162                  le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
3163         fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
3164         WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
3165         for (i = 0; i < fw_size; i++)
3166                 WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
3167         WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
3168
3169         return 0;
3170 }
3171
3172 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
3173 {
3174         struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
3175         const struct cs_section_def *sect = NULL;
3176         const struct cs_extent_def *ext = NULL;
3177         int r, i, tmp;
3178
3179         /* init the CP */
3180         WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
3181         WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
3182
3183         gfx_v9_0_cp_gfx_enable(adev, true);
3184
3185         r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
3186         if (r) {
3187                 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3188                 return r;
3189         }
3190
3191         amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3192         amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3193
3194         amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3195         amdgpu_ring_write(ring, 0x80000000);
3196         amdgpu_ring_write(ring, 0x80000000);
3197
3198         for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
3199                 for (ext = sect->section; ext->extent != NULL; ++ext) {
3200                         if (sect->id == SECT_CONTEXT) {
3201                                 amdgpu_ring_write(ring,
3202                                        PACKET3(PACKET3_SET_CONTEXT_REG,
3203                                                ext->reg_count));
3204                                 amdgpu_ring_write(ring,
3205                                        ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
3206                                 for (i = 0; i < ext->reg_count; i++)
3207                                         amdgpu_ring_write(ring, ext->extent[i]);
3208                         }
3209                 }
3210         }
3211
3212         amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3213         amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3214
3215         amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3216         amdgpu_ring_write(ring, 0);
3217
3218         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
3219         amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
3220         amdgpu_ring_write(ring, 0x8000);
3221         amdgpu_ring_write(ring, 0x8000);
3222
3223         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
3224         tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
3225                 (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
3226         amdgpu_ring_write(ring, tmp);
3227         amdgpu_ring_write(ring, 0);
3228
3229         amdgpu_ring_commit(ring);
3230
3231         return 0;
3232 }
3233
3234 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
3235 {
3236         struct amdgpu_ring *ring;
3237         u32 tmp;
3238         u32 rb_bufsz;
3239         u64 rb_addr, rptr_addr, wptr_gpu_addr;
3240
3241         /* Set the write pointer delay */
3242         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
3243
3244         /* set the RB to use vmid 0 */
3245         WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
3246
3247         /* Set ring buffer size */
3248         ring = &adev->gfx.gfx_ring[0];
3249         rb_bufsz = order_base_2(ring->ring_size / 8);
3250         tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
3251         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
3252 #ifdef __BIG_ENDIAN
3253         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
3254 #endif
3255         WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3256
3257         /* Initialize the ring buffer's write pointers */
3258         ring->wptr = 0;
3259         WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
3260         WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3261
3262         /* set the wb address wether it's enabled or not */
3263         rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3264         WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
3265         WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3266
3267         wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3268         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
3269         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
3270
3271         mdelay(1);
3272         WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3273
3274         rb_addr = ring->gpu_addr >> 8;
3275         WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
3276         WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
3277
3278         tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
3279         if (ring->use_doorbell) {
3280                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3281                                     DOORBELL_OFFSET, ring->doorbell_index);
3282                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3283                                     DOORBELL_EN, 1);
3284         } else {
3285                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
3286         }
3287         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
3288
3289         tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
3290                         DOORBELL_RANGE_LOWER, ring->doorbell_index);
3291         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
3292
3293         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
3294                        CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
3295
3296
3297         /* start the ring */
3298         gfx_v9_0_cp_gfx_start(adev);
3299         ring->sched.ready = true;
3300
3301         return 0;
3302 }
3303
3304 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
3305 {
3306         int i;
3307
3308         if (enable) {
3309                 WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0);
3310         } else {
3311                 WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
3312                         (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
3313                 for (i = 0; i < adev->gfx.num_compute_rings; i++)
3314                         adev->gfx.compute_ring[i].sched.ready = false;
3315                 adev->gfx.kiq.ring.sched.ready = false;
3316         }
3317         udelay(50);
3318 }
3319
3320 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3321 {
3322         const struct gfx_firmware_header_v1_0 *mec_hdr;
3323         const __le32 *fw_data;
3324         unsigned i;
3325         u32 tmp;
3326
3327         if (!adev->gfx.mec_fw)
3328                 return -EINVAL;
3329
3330         gfx_v9_0_cp_compute_enable(adev, false);
3331
3332         mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3333         amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3334
3335         fw_data = (const __le32 *)
3336                 (adev->gfx.mec_fw->data +
3337                  le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
3338         tmp = 0;
3339         tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
3340         tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
3341         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
3342
3343         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
3344                 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
3345         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
3346                 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
3347
3348         /* MEC1 */
3349         WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3350                          mec_hdr->jt_offset);
3351         for (i = 0; i < mec_hdr->jt_size; i++)
3352                 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
3353                         le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
3354
3355         WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3356                         adev->gfx.mec_fw_version);
3357         /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
3358
3359         return 0;
3360 }
3361
3362 /* KIQ functions */
3363 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
3364 {
3365         uint32_t tmp;
3366         struct amdgpu_device *adev = ring->adev;
3367
3368         /* tell RLC which is KIQ queue */
3369         tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
3370         tmp &= 0xffffff00;
3371         tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
3372         WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
3373         tmp |= 0x80;
3374         WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
3375 }
3376
3377 static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
3378 {
3379         struct amdgpu_device *adev = ring->adev;
3380
3381         if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
3382                 if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
3383                         mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
3384                         ring->has_high_prio = true;
3385                         mqd->cp_hqd_queue_priority =
3386                                 AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
3387                 } else {
3388                         ring->has_high_prio = false;
3389                 }
3390         }
3391 }
3392
3393 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
3394 {
3395         struct amdgpu_device *adev = ring->adev;
3396         struct v9_mqd *mqd = ring->mqd_ptr;
3397         uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
3398         uint32_t tmp;
3399
3400         mqd->header = 0xC0310800;
3401         mqd->compute_pipelinestat_enable = 0x00000001;
3402         mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3403         mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3404         mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3405         mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3406         mqd->compute_static_thread_mgmt_se4 = 0xffffffff;
3407         mqd->compute_static_thread_mgmt_se5 = 0xffffffff;
3408         mqd->compute_static_thread_mgmt_se6 = 0xffffffff;
3409         mqd->compute_static_thread_mgmt_se7 = 0xffffffff;
3410         mqd->compute_misc_reserved = 0x00000003;
3411
3412         mqd->dynamic_cu_mask_addr_lo =
3413                 lower_32_bits(ring->mqd_gpu_addr
3414                               + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3415         mqd->dynamic_cu_mask_addr_hi =
3416                 upper_32_bits(ring->mqd_gpu_addr
3417                               + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3418
3419         eop_base_addr = ring->eop_gpu_addr >> 8;
3420         mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
3421         mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
3422
3423         /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3424         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
3425         tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3426                         (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
3427
3428         mqd->cp_hqd_eop_control = tmp;
3429
3430         /* enable doorbell? */
3431         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3432
3433         if (ring->use_doorbell) {
3434                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3435                                     DOORBELL_OFFSET, ring->doorbell_index);
3436                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3437                                     DOORBELL_EN, 1);
3438                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3439                                     DOORBELL_SOURCE, 0);
3440                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3441                                     DOORBELL_HIT, 0);
3442         } else {
3443                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3444                                          DOORBELL_EN, 0);
3445         }
3446
3447         mqd->cp_hqd_pq_doorbell_control = tmp;
3448
3449         /* disable the queue if it's active */
3450         ring->wptr = 0;
3451         mqd->cp_hqd_dequeue_request = 0;
3452         mqd->cp_hqd_pq_rptr = 0;
3453         mqd->cp_hqd_pq_wptr_lo = 0;
3454         mqd->cp_hqd_pq_wptr_hi = 0;
3455
3456         /* set the pointer to the MQD */
3457         mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
3458         mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
3459
3460         /* set MQD vmid to 0 */
3461         tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
3462         tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3463         mqd->cp_mqd_control = tmp;
3464
3465         /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3466         hqd_gpu_addr = ring->gpu_addr >> 8;
3467         mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3468         mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3469
3470         /* set up the HQD, this is similar to CP_RB0_CNTL */
3471         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
3472         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3473                             (order_base_2(ring->ring_size / 4) - 1));
3474         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3475                         ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
3476 #ifdef __BIG_ENDIAN
3477         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
3478 #endif
3479         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
3480         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
3481         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3482         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3483         mqd->cp_hqd_pq_control = tmp;
3484
3485         /* set the wb address whether it's enabled or not */
3486         wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3487         mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3488         mqd->cp_hqd_pq_rptr_report_addr_hi =
3489                 upper_32_bits(wb_gpu_addr) & 0xffff;
3490
3491         /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3492         wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3493         mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3494         mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3495
3496         tmp = 0;
3497         /* enable the doorbell if requested */
3498         if (ring->use_doorbell) {
3499                 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3500                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3501                                 DOORBELL_OFFSET, ring->doorbell_index);
3502
3503                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3504                                          DOORBELL_EN, 1);
3505                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3506                                          DOORBELL_SOURCE, 0);
3507                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3508                                          DOORBELL_HIT, 0);
3509         }
3510
3511         mqd->cp_hqd_pq_doorbell_control = tmp;
3512
3513         /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3514         ring->wptr = 0;
3515         mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
3516
3517         /* set the vmid for the queue */
3518         mqd->cp_hqd_vmid = 0;
3519
3520         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
3521         tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
3522         mqd->cp_hqd_persistent_state = tmp;
3523
3524         /* set MIN_IB_AVAIL_SIZE */
3525         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
3526         tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
3527         mqd->cp_hqd_ib_control = tmp;
3528
3529         /* set static priority for a queue/ring */
3530         gfx_v9_0_mqd_set_priority(ring, mqd);
3531         mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
3532
3533         /* map_queues packet doesn't need activate the queue,
3534          * so only kiq need set this field.
3535          */
3536         if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
3537                 mqd->cp_hqd_active = 1;
3538
3539         return 0;
3540 }
3541
3542 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
3543 {
3544         struct amdgpu_device *adev = ring->adev;
3545         struct v9_mqd *mqd = ring->mqd_ptr;
3546         int j;
3547
3548         /* disable wptr polling */
3549         WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3550
3551         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
3552                mqd->cp_hqd_eop_base_addr_lo);
3553         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
3554                mqd->cp_hqd_eop_base_addr_hi);
3555
3556         /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3557         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_CONTROL,
3558                mqd->cp_hqd_eop_control);
3559
3560         /* enable doorbell? */
3561         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3562                mqd->cp_hqd_pq_doorbell_control);
3563
3564         /* disable the queue if it's active */
3565         if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3566                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3567                 for (j = 0; j < adev->usec_timeout; j++) {
3568                         if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3569                                 break;
3570                         udelay(1);
3571                 }
3572                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3573                        mqd->cp_hqd_dequeue_request);
3574                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR,
3575                        mqd->cp_hqd_pq_rptr);
3576                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3577                        mqd->cp_hqd_pq_wptr_lo);
3578                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3579                        mqd->cp_hqd_pq_wptr_hi);
3580         }
3581
3582         /* set the pointer to the MQD */
3583         WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR,
3584                mqd->cp_mqd_base_addr_lo);
3585         WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR_HI,
3586                mqd->cp_mqd_base_addr_hi);
3587
3588         /* set MQD vmid to 0 */
3589         WREG32_SOC15_RLC(GC, 0, mmCP_MQD_CONTROL,
3590                mqd->cp_mqd_control);
3591
3592         /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3593         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE,
3594                mqd->cp_hqd_pq_base_lo);
3595         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE_HI,
3596                mqd->cp_hqd_pq_base_hi);
3597
3598         /* set up the HQD, this is similar to CP_RB0_CNTL */
3599         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_CONTROL,
3600                mqd->cp_hqd_pq_control);
3601
3602         /* set the wb address whether it's enabled or not */
3603         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3604                                 mqd->cp_hqd_pq_rptr_report_addr_lo);
3605         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3606                                 mqd->cp_hqd_pq_rptr_report_addr_hi);
3607
3608         /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3609         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
3610                mqd->cp_hqd_pq_wptr_poll_addr_lo);
3611         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3612                mqd->cp_hqd_pq_wptr_poll_addr_hi);
3613
3614         /* enable the doorbell if requested */
3615         if (ring->use_doorbell) {
3616                 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
3617                                         (adev->doorbell_index.kiq * 2) << 2);
3618                 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3619                                         (adev->doorbell_index.userqueue_end * 2) << 2);
3620         }
3621
3622         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3623                mqd->cp_hqd_pq_doorbell_control);
3624
3625         /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3626         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3627                mqd->cp_hqd_pq_wptr_lo);
3628         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3629                mqd->cp_hqd_pq_wptr_hi);
3630
3631         /* set the vmid for the queue */
3632         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3633
3634         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE,
3635                mqd->cp_hqd_persistent_state);
3636
3637         /* activate the queue */
3638         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE,
3639                mqd->cp_hqd_active);
3640
3641         if (ring->use_doorbell)
3642                 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3643
3644         return 0;
3645 }
3646
3647 static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
3648 {
3649         struct amdgpu_device *adev = ring->adev;
3650         int j;
3651
3652         /* disable the queue if it's active */
3653         if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3654
3655                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3656
3657                 for (j = 0; j < adev->usec_timeout; j++) {
3658                         if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3659                                 break;
3660                         udelay(1);
3661                 }
3662
3663                 if (j == AMDGPU_MAX_USEC_TIMEOUT) {
3664                         DRM_DEBUG("KIQ dequeue request failed.\n");
3665
3666                         /* Manual disable if dequeue request times out */
3667                         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE, 0);
3668                 }
3669
3670                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3671                       0);
3672         }
3673
3674         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IQ_TIMER, 0);
3675         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IB_CONTROL, 0);
3676         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
3677         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
3678         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
3679         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR, 0);
3680         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
3681         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
3682
3683         return 0;
3684 }
3685
3686 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
3687 {
3688         struct amdgpu_device *adev = ring->adev;
3689         struct v9_mqd *mqd = ring->mqd_ptr;
3690         int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
3691
3692         gfx_v9_0_kiq_setting(ring);
3693
3694         if (adev->in_gpu_reset) { /* for GPU_RESET case */
3695                 /* reset MQD to a clean status */
3696                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3697                         memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3698
3699                 /* reset ring buffer */
3700                 ring->wptr = 0;
3701                 amdgpu_ring_clear_ring(ring);
3702
3703                 mutex_lock(&adev->srbm_mutex);
3704                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3705                 gfx_v9_0_kiq_init_register(ring);
3706                 soc15_grbm_select(adev, 0, 0, 0, 0);
3707                 mutex_unlock(&adev->srbm_mutex);
3708         } else {
3709                 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3710                 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3711                 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3712                 mutex_lock(&adev->srbm_mutex);
3713                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3714                 gfx_v9_0_mqd_init(ring);
3715                 gfx_v9_0_kiq_init_register(ring);
3716                 soc15_grbm_select(adev, 0, 0, 0, 0);
3717                 mutex_unlock(&adev->srbm_mutex);
3718
3719                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3720                         memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3721         }
3722
3723         return 0;
3724 }
3725
3726 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
3727 {
3728         struct amdgpu_device *adev = ring->adev;
3729         struct v9_mqd *mqd = ring->mqd_ptr;
3730         int mqd_idx = ring - &adev->gfx.compute_ring[0];
3731
3732         if (!adev->in_gpu_reset && !adev->in_suspend) {
3733                 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3734                 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3735                 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3736                 mutex_lock(&adev->srbm_mutex);
3737                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3738                 gfx_v9_0_mqd_init(ring);
3739                 soc15_grbm_select(adev, 0, 0, 0, 0);
3740                 mutex_unlock(&adev->srbm_mutex);
3741
3742                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3743                         memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3744         } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
3745                 /* reset MQD to a clean status */
3746                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3747                         memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3748
3749                 /* reset ring buffer */
3750                 ring->wptr = 0;
3751                 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
3752                 amdgpu_ring_clear_ring(ring);
3753         } else {
3754                 amdgpu_ring_clear_ring(ring);
3755         }
3756
3757         return 0;
3758 }
3759
3760 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
3761 {
3762         struct amdgpu_ring *ring;
3763         int r;
3764
3765         ring = &adev->gfx.kiq.ring;
3766
3767         r = amdgpu_bo_reserve(ring->mqd_obj, false);
3768         if (unlikely(r != 0))
3769                 return r;
3770
3771         r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3772         if (unlikely(r != 0))
3773                 return r;
3774
3775         gfx_v9_0_kiq_init_queue(ring);
3776         amdgpu_bo_kunmap(ring->mqd_obj);
3777         ring->mqd_ptr = NULL;
3778         amdgpu_bo_unreserve(ring->mqd_obj);
3779         ring->sched.ready = true;
3780         return 0;
3781 }
3782
3783 static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
3784 {
3785         struct amdgpu_ring *ring = NULL;
3786         int r = 0, i;
3787
3788         gfx_v9_0_cp_compute_enable(adev, true);
3789
3790         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3791                 ring = &adev->gfx.compute_ring[i];
3792
3793                 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3794                 if (unlikely(r != 0))
3795                         goto done;
3796                 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3797                 if (!r) {
3798                         r = gfx_v9_0_kcq_init_queue(ring);
3799                         amdgpu_bo_kunmap(ring->mqd_obj);
3800                         ring->mqd_ptr = NULL;
3801                 }
3802                 amdgpu_bo_unreserve(ring->mqd_obj);
3803                 if (r)
3804                         goto done;
3805         }
3806
3807         r = amdgpu_gfx_enable_kcq(adev);
3808 done:
3809         return r;
3810 }
3811
3812 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
3813 {
3814         int r, i;
3815         struct amdgpu_ring *ring;
3816
3817         if (!(adev->flags & AMD_IS_APU))
3818                 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3819
3820         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3821                 if (adev->asic_type != CHIP_ARCTURUS) {
3822                         /* legacy firmware loading */
3823                         r = gfx_v9_0_cp_gfx_load_microcode(adev);
3824                         if (r)
3825                                 return r;
3826                 }
3827
3828                 r = gfx_v9_0_cp_compute_load_microcode(adev);
3829                 if (r)
3830                         return r;
3831         }
3832
3833         r = gfx_v9_0_kiq_resume(adev);
3834         if (r)
3835                 return r;
3836
3837         if (adev->asic_type != CHIP_ARCTURUS) {
3838                 r = gfx_v9_0_cp_gfx_resume(adev);
3839                 if (r)
3840                         return r;
3841         }
3842
3843         r = gfx_v9_0_kcq_resume(adev);
3844         if (r)
3845                 return r;
3846
3847         if (adev->asic_type != CHIP_ARCTURUS) {
3848                 ring = &adev->gfx.gfx_ring[0];
3849                 r = amdgpu_ring_test_helper(ring);
3850                 if (r)
3851                         return r;
3852         }
3853
3854         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3855                 ring = &adev->gfx.compute_ring[i];
3856                 amdgpu_ring_test_helper(ring);
3857         }
3858
3859         gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3860
3861         return 0;
3862 }
3863
3864 static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev)
3865 {
3866         u32 tmp;
3867
3868         if (adev->asic_type != CHIP_ARCTURUS)
3869                 return;
3870
3871         tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG);
3872         tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE64KHASH,
3873                                 adev->df.hash_status.hash_64k);
3874         tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE2MHASH,
3875                                 adev->df.hash_status.hash_2m);
3876         tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE1GHASH,
3877                                 adev->df.hash_status.hash_1g);
3878         WREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG, tmp);
3879 }
3880
3881 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
3882 {
3883         if (adev->asic_type != CHIP_ARCTURUS)
3884                 gfx_v9_0_cp_gfx_enable(adev, enable);
3885         gfx_v9_0_cp_compute_enable(adev, enable);
3886 }
3887
3888 static int gfx_v9_0_hw_init(void *handle)
3889 {
3890         int r;
3891         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3892
3893         if (!amdgpu_sriov_vf(adev))
3894                 gfx_v9_0_init_golden_registers(adev);
3895
3896         gfx_v9_0_constants_init(adev);
3897
3898         gfx_v9_0_init_tcp_config(adev);
3899
3900         r = adev->gfx.rlc.funcs->resume(adev);
3901         if (r)
3902                 return r;
3903
3904         r = gfx_v9_0_cp_resume(adev);
3905         if (r)
3906                 return r;
3907
3908         return r;
3909 }
3910
3911 static int gfx_v9_0_hw_fini(void *handle)
3912 {
3913         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3914
3915         amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
3916         amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3917         amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3918
3919         /* DF freeze and kcq disable will fail */
3920         if (!amdgpu_ras_intr_triggered())
3921                 /* disable KCQ to avoid CPC touch memory not valid anymore */
3922                 amdgpu_gfx_disable_kcq(adev);
3923
3924         if (amdgpu_sriov_vf(adev)) {
3925                 gfx_v9_0_cp_gfx_enable(adev, false);
3926                 /* must disable polling for SRIOV when hw finished, otherwise
3927                  * CPC engine may still keep fetching WB address which is already
3928                  * invalid after sw finished and trigger DMAR reading error in
3929                  * hypervisor side.
3930                  */
3931                 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3932                 return 0;
3933         }
3934
3935         /* Use deinitialize sequence from CAIL when unbinding device from driver,
3936          * otherwise KIQ is hanging when binding back
3937          */
3938         if (!adev->in_gpu_reset && !adev->in_suspend) {
3939                 mutex_lock(&adev->srbm_mutex);
3940                 soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
3941                                 adev->gfx.kiq.ring.pipe,
3942                                 adev->gfx.kiq.ring.queue, 0);
3943                 gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring);
3944                 soc15_grbm_select(adev, 0, 0, 0, 0);
3945                 mutex_unlock(&adev->srbm_mutex);
3946         }
3947
3948         gfx_v9_0_cp_enable(adev, false);
3949         adev->gfx.rlc.funcs->stop(adev);
3950
3951         return 0;
3952 }
3953
3954 static int gfx_v9_0_suspend(void *handle)
3955 {
3956         return gfx_v9_0_hw_fini(handle);
3957 }
3958
3959 static int gfx_v9_0_resume(void *handle)
3960 {
3961         return gfx_v9_0_hw_init(handle);
3962 }
3963
3964 static bool gfx_v9_0_is_idle(void *handle)
3965 {
3966         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3967
3968         if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
3969                                 GRBM_STATUS, GUI_ACTIVE))
3970                 return false;
3971         else
3972                 return true;
3973 }
3974
3975 static int gfx_v9_0_wait_for_idle(void *handle)
3976 {
3977         unsigned i;
3978         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3979
3980         for (i = 0; i < adev->usec_timeout; i++) {
3981                 if (gfx_v9_0_is_idle(handle))
3982                         return 0;
3983                 udelay(1);
3984         }
3985         return -ETIMEDOUT;
3986 }
3987
3988 static int gfx_v9_0_soft_reset(void *handle)
3989 {
3990         u32 grbm_soft_reset = 0;
3991         u32 tmp;
3992         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3993
3994         /* GRBM_STATUS */
3995         tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
3996         if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
3997                    GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
3998                    GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
3999                    GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4000                    GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4001                    GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
4002                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4003                                                 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4004                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4005                                                 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
4006         }
4007
4008         if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4009                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4010                                                 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4011         }
4012
4013         /* GRBM_STATUS2 */
4014         tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
4015         if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
4016                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4017                                                 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4018
4019
4020         if (grbm_soft_reset) {
4021                 /* stop the rlc */
4022                 adev->gfx.rlc.funcs->stop(adev);
4023
4024                 if (adev->asic_type != CHIP_ARCTURUS)
4025                         /* Disable GFX parsing/prefetching */
4026                         gfx_v9_0_cp_gfx_enable(adev, false);
4027
4028                 /* Disable MEC parsing/prefetching */
4029                 gfx_v9_0_cp_compute_enable(adev, false);
4030
4031                 if (grbm_soft_reset) {
4032                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4033                         tmp |= grbm_soft_reset;
4034                         dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
4035                         WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
4036                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4037
4038                         udelay(50);
4039
4040                         tmp &= ~grbm_soft_reset;
4041                         WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
4042                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4043                 }
4044
4045                 /* Wait a little for things to settle down */
4046                 udelay(50);
4047         }
4048         return 0;
4049 }
4050
4051 static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
4052 {
4053         signed long r, cnt = 0;
4054         unsigned long flags;
4055         uint32_t seq;
4056         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
4057         struct amdgpu_ring *ring = &kiq->ring;
4058
4059         BUG_ON(!ring->funcs->emit_rreg);
4060
4061         spin_lock_irqsave(&kiq->ring_lock, flags);
4062         amdgpu_ring_alloc(ring, 32);
4063         amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4064         amdgpu_ring_write(ring, 9 |     /* src: register*/
4065                                 (5 << 8) |      /* dst: memory */
4066                                 (1 << 16) |     /* count sel */
4067                                 (1 << 20));     /* write confirm */
4068         amdgpu_ring_write(ring, 0);
4069         amdgpu_ring_write(ring, 0);
4070         amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4071                                 kiq->reg_val_offs * 4));
4072         amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4073                                 kiq->reg_val_offs * 4));
4074         amdgpu_fence_emit_polling(ring, &seq);
4075         amdgpu_ring_commit(ring);
4076         spin_unlock_irqrestore(&kiq->ring_lock, flags);
4077
4078         r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
4079
4080         /* don't wait anymore for gpu reset case because this way may
4081          * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
4082          * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
4083          * never return if we keep waiting in virt_kiq_rreg, which cause
4084          * gpu_recover() hang there.
4085          *
4086          * also don't wait anymore for IRQ context
4087          * */
4088         if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
4089                 goto failed_kiq_read;
4090
4091         might_sleep();
4092         while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
4093                 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
4094                 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
4095         }
4096
4097         if (cnt > MAX_KIQ_REG_TRY)
4098                 goto failed_kiq_read;
4099
4100         return (uint64_t)adev->wb.wb[kiq->reg_val_offs] |
4101                 (uint64_t)adev->wb.wb[kiq->reg_val_offs + 1 ] << 32ULL;
4102
4103 failed_kiq_read:
4104         pr_err("failed to read gpu clock\n");
4105         return ~0;
4106 }
4107
4108 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4109 {
4110         uint64_t clock;
4111
4112         amdgpu_gfx_off_ctrl(adev, false);
4113         mutex_lock(&adev->gfx.gpu_clock_mutex);
4114         if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
4115                 clock = gfx_v9_0_kiq_read_clock(adev);
4116         } else {
4117                 WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4118                 clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
4119                         ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4120         }
4121         mutex_unlock(&adev->gfx.gpu_clock_mutex);
4122         amdgpu_gfx_off_ctrl(adev, true);
4123         return clock;
4124 }
4125
4126 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4127                                           uint32_t vmid,
4128                                           uint32_t gds_base, uint32_t gds_size,
4129                                           uint32_t gws_base, uint32_t gws_size,
4130                                           uint32_t oa_base, uint32_t oa_size)
4131 {
4132         struct amdgpu_device *adev = ring->adev;
4133
4134         /* GDS Base */
4135         gfx_v9_0_write_data_to_reg(ring, 0, false,
4136                                    SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
4137                                    gds_base);
4138
4139         /* GDS Size */
4140         gfx_v9_0_write_data_to_reg(ring, 0, false,
4141                                    SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
4142                                    gds_size);
4143
4144         /* GWS */
4145         gfx_v9_0_write_data_to_reg(ring, 0, false,
4146                                    SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
4147                                    gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4148
4149         /* OA */
4150         gfx_v9_0_write_data_to_reg(ring, 0, false,
4151                                    SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
4152                                    (1 << (oa_size + oa_base)) - (1 << oa_base));
4153 }
4154
4155 static const u32 vgpr_init_compute_shader[] =
4156 {
4157         0xb07c0000, 0xbe8000ff,
4158         0x000000f8, 0xbf110800,
4159         0x7e000280, 0x7e020280,
4160         0x7e040280, 0x7e060280,
4161         0x7e080280, 0x7e0a0280,
4162         0x7e0c0280, 0x7e0e0280,
4163         0x80808800, 0xbe803200,
4164         0xbf84fff5, 0xbf9c0000,
4165         0xd28c0001, 0x0001007f,
4166         0xd28d0001, 0x0002027e,
4167         0x10020288, 0xb8810904,
4168         0xb7814000, 0xd1196a01,
4169         0x00000301, 0xbe800087,
4170         0xbefc00c1, 0xd89c4000,
4171         0x00020201, 0xd89cc080,
4172         0x00040401, 0x320202ff,
4173         0x00000800, 0x80808100,
4174         0xbf84fff8, 0x7e020280,
4175         0xbf810000, 0x00000000,
4176 };
4177
4178 static const u32 sgpr_init_compute_shader[] =
4179 {
4180         0xb07c0000, 0xbe8000ff,
4181         0x0000005f, 0xbee50080,
4182         0xbe812c65, 0xbe822c65,
4183         0xbe832c65, 0xbe842c65,
4184         0xbe852c65, 0xb77c0005,
4185         0x80808500, 0xbf84fff8,
4186         0xbe800080, 0xbf810000,
4187 };
4188
4189 static const u32 vgpr_init_compute_shader_arcturus[] = {
4190         0xd3d94000, 0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080,
4191         0xd3d94003, 0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080,
4192         0xd3d94006, 0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080,
4193         0xd3d94009, 0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080,
4194         0xd3d9400c, 0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080,
4195         0xd3d9400f, 0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080,
4196         0xd3d94012, 0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080,
4197         0xd3d94015, 0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080,
4198         0xd3d94018, 0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080,
4199         0xd3d9401b, 0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080,
4200         0xd3d9401e, 0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080,
4201         0xd3d94021, 0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080,
4202         0xd3d94024, 0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080,
4203         0xd3d94027, 0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080,
4204         0xd3d9402a, 0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080,
4205         0xd3d9402d, 0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080,
4206         0xd3d94030, 0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080,
4207         0xd3d94033, 0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080,
4208         0xd3d94036, 0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080,
4209         0xd3d94039, 0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080,
4210         0xd3d9403c, 0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080,
4211         0xd3d9403f, 0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080,
4212         0xd3d94042, 0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080,
4213         0xd3d94045, 0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080,
4214         0xd3d94048, 0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080,
4215         0xd3d9404b, 0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080,
4216         0xd3d9404e, 0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080,
4217         0xd3d94051, 0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080,
4218         0xd3d94054, 0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080,
4219         0xd3d94057, 0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080,
4220         0xd3d9405a, 0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080,
4221         0xd3d9405d, 0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080,
4222         0xd3d94060, 0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080,
4223         0xd3d94063, 0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080,
4224         0xd3d94066, 0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080,
4225         0xd3d94069, 0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080,
4226         0xd3d9406c, 0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080,
4227         0xd3d9406f, 0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080,
4228         0xd3d94072, 0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080,
4229         0xd3d94075, 0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080,
4230         0xd3d94078, 0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080,
4231         0xd3d9407b, 0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080,
4232         0xd3d9407e, 0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080,
4233         0xd3d94081, 0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080,
4234         0xd3d94084, 0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080,
4235         0xd3d94087, 0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080,
4236         0xd3d9408a, 0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080,
4237         0xd3d9408d, 0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080,
4238         0xd3d94090, 0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080,
4239         0xd3d94093, 0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080,
4240         0xd3d94096, 0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080,
4241         0xd3d94099, 0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080,
4242         0xd3d9409c, 0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080,
4243         0xd3d9409f, 0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080,
4244         0xd3d940a2, 0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080,
4245         0xd3d940a5, 0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080,
4246         0xd3d940a8, 0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080,
4247         0xd3d940ab, 0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080,
4248         0xd3d940ae, 0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080,
4249         0xd3d940b1, 0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080,
4250         0xd3d940b4, 0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080,
4251         0xd3d940b7, 0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080,
4252         0xd3d940ba, 0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080,
4253         0xd3d940bd, 0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080,
4254         0xd3d940c0, 0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080,
4255         0xd3d940c3, 0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080,
4256         0xd3d940c6, 0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080,
4257         0xd3d940c9, 0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080,
4258         0xd3d940cc, 0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080,
4259         0xd3d940cf, 0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080,
4260         0xd3d940d2, 0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080,
4261         0xd3d940d5, 0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080,
4262         0xd3d940d8, 0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080,
4263         0xd3d940db, 0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080,
4264         0xd3d940de, 0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080,
4265         0xd3d940e1, 0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080,
4266         0xd3d940e4, 0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080,
4267         0xd3d940e7, 0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080,
4268         0xd3d940ea, 0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080,
4269         0xd3d940ed, 0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080,
4270         0xd3d940f0, 0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080,
4271         0xd3d940f3, 0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080,
4272         0xd3d940f6, 0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080,
4273         0xd3d940f9, 0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080,
4274         0xd3d940fc, 0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080,
4275         0xd3d940ff, 0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a,
4276         0x7e000280, 0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280,
4277         0x7e0c0280, 0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000,
4278         0xd28c0001, 0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xb88b0904,
4279         0xb78b4000, 0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000,
4280         0x00020201, 0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a,
4281         0xbf84fff8, 0xbf810000,
4282 };
4283
4284 /* When below register arrays changed, please update gpr_reg_size,
4285   and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds,
4286   to cover all gfx9 ASICs */
4287 static const struct soc15_reg_entry vgpr_init_regs[] = {
4288    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4289    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4290    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
4291    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4292    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x3f },
4293    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },  /* 64KB LDS */
4294    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4295    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4296    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4297    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4298    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
4299    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
4300    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
4301    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
4302 };
4303
4304 static const struct soc15_reg_entry vgpr_init_regs_arcturus[] = {
4305    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4306    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4307    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
4308    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4309    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0xbf },
4310    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },  /* 64KB LDS */
4311    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4312    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4313    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4314    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4315    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
4316    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
4317    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
4318    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
4319 };
4320
4321 static const struct soc15_reg_entry sgpr1_init_regs[] = {
4322    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4323    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4324    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
4325    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4326    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
4327    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
4328    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff },
4329    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff },
4330    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff },
4331    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff },
4332    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x000000ff },
4333    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x000000ff },
4334    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x000000ff },
4335    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x000000ff },
4336 };
4337
4338 static const struct soc15_reg_entry sgpr2_init_regs[] = {
4339    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4340    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4341    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
4342    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4343    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
4344    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
4345    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 },
4346    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 },
4347    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 },
4348    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 },
4349    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x0000ff00 },
4350    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x0000ff00 },
4351    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x0000ff00 },
4352    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 },
4353 };
4354
4355 static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = {
4356    { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1},
4357    { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1},
4358    { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1},
4359    { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1},
4360    { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1, 1},
4361    { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), 0, 1, 1},
4362    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1},
4363    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1},
4364    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1},
4365    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1},
4366    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1},
4367    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1},
4368    { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1},
4369    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6},
4370    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16},
4371    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16},
4372    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16},
4373    { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16},
4374    { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16},
4375    { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16},
4376    { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16},
4377    { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16},
4378    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6},
4379    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16},
4380    { SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16},
4381    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1},
4382    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1},
4383    { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32},
4384    { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32},
4385    { SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72},
4386    { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
4387    { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
4388    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
4389 };
4390
4391 static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
4392 {
4393         struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4394         int i, r;
4395
4396         /* only support when RAS is enabled */
4397         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4398                 return 0;
4399
4400         r = amdgpu_ring_alloc(ring, 7);
4401         if (r) {
4402                 DRM_ERROR("amdgpu: GDS workarounds failed to lock ring %s (%d).\n",
4403                         ring->name, r);
4404                 return r;
4405         }
4406
4407         WREG32_SOC15(GC, 0, mmGDS_VMID0_BASE, 0x00000000);
4408         WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, adev->gds.gds_size);
4409
4410         amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
4411         amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
4412                                 PACKET3_DMA_DATA_DST_SEL(1) |
4413                                 PACKET3_DMA_DATA_SRC_SEL(2) |
4414                                 PACKET3_DMA_DATA_ENGINE(0)));
4415         amdgpu_ring_write(ring, 0);
4416         amdgpu_ring_write(ring, 0);
4417         amdgpu_ring_write(ring, 0);
4418         amdgpu_ring_write(ring, 0);
4419         amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
4420                                 adev->gds.gds_size);
4421
4422         amdgpu_ring_commit(ring);
4423
4424         for (i = 0; i < adev->usec_timeout; i++) {
4425                 if (ring->wptr == gfx_v9_0_ring_get_rptr_compute(ring))
4426                         break;
4427                 udelay(1);
4428         }
4429
4430         if (i >= adev->usec_timeout)
4431                 r = -ETIMEDOUT;
4432
4433         WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, 0x00000000);
4434
4435         return r;
4436 }
4437
4438 static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
4439 {
4440         struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4441         struct amdgpu_ib ib;
4442         struct dma_fence *f = NULL;
4443         int r, i;
4444         unsigned total_size, vgpr_offset, sgpr_offset;
4445         u64 gpu_addr;
4446
4447         int compute_dim_x = adev->gfx.config.max_shader_engines *
4448                                                 adev->gfx.config.max_cu_per_sh *
4449                                                 adev->gfx.config.max_sh_per_se;
4450         int sgpr_work_group_size = 5;
4451         int gpr_reg_size = adev->gfx.config.max_shader_engines + 6;
4452         int vgpr_init_shader_size;
4453         const u32 *vgpr_init_shader_ptr;
4454         const struct soc15_reg_entry *vgpr_init_regs_ptr;
4455
4456         /* only support when RAS is enabled */
4457         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4458                 return 0;
4459
4460         /* bail if the compute ring is not ready */
4461         if (!ring->sched.ready)
4462                 return 0;
4463
4464         if (adev->asic_type == CHIP_ARCTURUS) {
4465                 vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus;
4466                 vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus);
4467                 vgpr_init_regs_ptr = vgpr_init_regs_arcturus;
4468         } else {
4469                 vgpr_init_shader_ptr = vgpr_init_compute_shader;
4470                 vgpr_init_shader_size = sizeof(vgpr_init_compute_shader);
4471                 vgpr_init_regs_ptr = vgpr_init_regs;
4472         }
4473
4474         total_size =
4475                 (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */
4476         total_size +=
4477                 (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS1 */
4478         total_size +=
4479                 (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */
4480         total_size = ALIGN(total_size, 256);
4481         vgpr_offset = total_size;
4482         total_size += ALIGN(vgpr_init_shader_size, 256);
4483         sgpr_offset = total_size;
4484         total_size += sizeof(sgpr_init_compute_shader);
4485
4486         /* allocate an indirect buffer to put the commands in */
4487         memset(&ib, 0, sizeof(ib));
4488         r = amdgpu_ib_get(adev, NULL, total_size, &ib);
4489         if (r) {
4490                 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
4491                 return r;
4492         }
4493
4494         /* load the compute shaders */
4495         for (i = 0; i < vgpr_init_shader_size/sizeof(u32); i++)
4496                 ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_shader_ptr[i];
4497
4498         for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
4499                 ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
4500
4501         /* init the ib length to 0 */
4502         ib.length_dw = 0;
4503
4504         /* VGPR */
4505         /* write the register state for the compute dispatch */
4506         for (i = 0; i < gpr_reg_size; i++) {
4507                 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4508                 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs_ptr[i])
4509                                                                 - PACKET3_SET_SH_REG_START;
4510                 ib.ptr[ib.length_dw++] = vgpr_init_regs_ptr[i].reg_value;
4511         }
4512         /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4513         gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
4514         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4515         ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4516                                                         - PACKET3_SET_SH_REG_START;
4517         ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4518         ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4519
4520         /* write dispatch packet */
4521         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4522         ib.ptr[ib.length_dw++] = compute_dim_x * 2; /* x */
4523         ib.ptr[ib.length_dw++] = 1; /* y */
4524         ib.ptr[ib.length_dw++] = 1; /* z */
4525         ib.ptr[ib.length_dw++] =
4526                 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4527
4528         /* write CS partial flush packet */
4529         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4530         ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4531
4532         /* SGPR1 */
4533         /* write the register state for the compute dispatch */
4534         for (i = 0; i < gpr_reg_size; i++) {
4535                 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4536                 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i])
4537                                                                 - PACKET3_SET_SH_REG_START;
4538                 ib.ptr[ib.length_dw++] = sgpr1_init_regs[i].reg_value;
4539         }
4540         /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4541         gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4542         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4543         ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4544                                                         - PACKET3_SET_SH_REG_START;
4545         ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4546         ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4547
4548         /* write dispatch packet */
4549         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4550         ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
4551         ib.ptr[ib.length_dw++] = 1; /* y */
4552         ib.ptr[ib.length_dw++] = 1; /* z */
4553         ib.ptr[ib.length_dw++] =
4554                 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4555
4556         /* write CS partial flush packet */
4557         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4558         ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4559
4560         /* SGPR2 */
4561         /* write the register state for the compute dispatch */
4562         for (i = 0; i < gpr_reg_size; i++) {
4563                 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4564                 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i])
4565                                                                 - PACKET3_SET_SH_REG_START;
4566                 ib.ptr[ib.length_dw++] = sgpr2_init_regs[i].reg_value;
4567         }
4568         /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4569         gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4570         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4571         ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4572                                                         - PACKET3_SET_SH_REG_START;
4573         ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4574         ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4575
4576         /* write dispatch packet */
4577         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4578         ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
4579         ib.ptr[ib.length_dw++] = 1; /* y */
4580         ib.ptr[ib.length_dw++] = 1; /* z */
4581         ib.ptr[ib.length_dw++] =
4582                 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4583
4584         /* write CS partial flush packet */
4585         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4586         ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4587
4588         /* shedule the ib on the ring */
4589         r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
4590         if (r) {
4591                 DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
4592                 goto fail;
4593         }
4594
4595         /* wait for the GPU to finish processing the IB */
4596         r = dma_fence_wait(f, false);
4597         if (r) {
4598                 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
4599                 goto fail;
4600         }
4601
4602 fail:
4603         amdgpu_ib_free(adev, &ib, NULL);
4604         dma_fence_put(f);
4605
4606         return r;
4607 }
4608
4609 static int gfx_v9_0_early_init(void *handle)
4610 {
4611         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4612
4613         if (adev->asic_type == CHIP_ARCTURUS)
4614                 adev->gfx.num_gfx_rings = 0;
4615         else
4616                 adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
4617         adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
4618         gfx_v9_0_set_kiq_pm4_funcs(adev);
4619         gfx_v9_0_set_ring_funcs(adev);
4620         gfx_v9_0_set_irq_funcs(adev);
4621         gfx_v9_0_set_gds_init(adev);
4622         gfx_v9_0_set_rlc_funcs(adev);
4623
4624         return 0;
4625 }
4626
4627 static int gfx_v9_0_ecc_late_init(void *handle)
4628 {
4629         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4630         int r;
4631
4632         /*
4633          * Temp workaround to fix the issue that CP firmware fails to
4634          * update read pointer when CPDMA is writing clearing operation
4635          * to GDS in suspend/resume sequence on several cards. So just
4636          * limit this operation in cold boot sequence.
4637          */
4638         if (!adev->in_suspend) {
4639                 r = gfx_v9_0_do_edc_gds_workarounds(adev);
4640                 if (r)
4641                         return r;
4642         }
4643
4644         /* requires IBs so do in late init after IB pool is initialized */
4645         r = gfx_v9_0_do_edc_gpr_workarounds(adev);
4646         if (r)
4647                 return r;
4648
4649         if (adev->gfx.funcs &&
4650             adev->gfx.funcs->reset_ras_error_count)
4651                 adev->gfx.funcs->reset_ras_error_count(adev);
4652
4653         r = amdgpu_gfx_ras_late_init(adev);
4654         if (r)
4655                 return r;
4656
4657         return 0;
4658 }
4659
4660 static int gfx_v9_0_late_init(void *handle)
4661 {
4662         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4663         int r;
4664
4665         r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4666         if (r)
4667                 return r;
4668
4669         r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4670         if (r)
4671                 return r;
4672
4673         r = gfx_v9_0_ecc_late_init(handle);
4674         if (r)
4675                 return r;
4676
4677         return 0;
4678 }
4679
4680 static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
4681 {
4682         uint32_t rlc_setting;
4683
4684         /* if RLC is not enabled, do nothing */
4685         rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
4686         if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
4687                 return false;
4688
4689         return true;
4690 }
4691
4692 static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev)
4693 {
4694         uint32_t data;
4695         unsigned i;
4696
4697         data = RLC_SAFE_MODE__CMD_MASK;
4698         data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
4699         WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4700
4701         /* wait for RLC_SAFE_MODE */
4702         for (i = 0; i < adev->usec_timeout; i++) {
4703                 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
4704                         break;
4705                 udelay(1);
4706         }
4707 }
4708
4709 static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev)
4710 {
4711         uint32_t data;
4712
4713         data = RLC_SAFE_MODE__CMD_MASK;
4714         WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4715 }
4716
4717 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
4718                                                 bool enable)
4719 {
4720         amdgpu_gfx_rlc_enter_safe_mode(adev);
4721
4722         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
4723                 gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
4724                 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4725                         gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
4726         } else {
4727                 gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
4728                 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4729                         gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
4730         }
4731
4732         amdgpu_gfx_rlc_exit_safe_mode(adev);
4733 }
4734
4735 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
4736                                                 bool enable)
4737 {
4738         /* TODO: double check if we need to perform under safe mode */
4739         /* gfx_v9_0_enter_rlc_safe_mode(adev); */
4740
4741         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
4742                 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
4743         else
4744                 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);
4745
4746         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
4747                 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
4748         else
4749                 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);
4750
4751         /* gfx_v9_0_exit_rlc_safe_mode(adev); */
4752 }
4753
4754 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
4755                                                       bool enable)
4756 {
4757         uint32_t data, def;
4758
4759         amdgpu_gfx_rlc_enter_safe_mode(adev);
4760
4761         /* It is disabled by HW by default */
4762         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
4763                 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
4764                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4765
4766                 if (adev->asic_type != CHIP_VEGA12)
4767                         data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
4768
4769                 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4770                           RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4771                           RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4772
4773                 /* only for Vega10 & Raven1 */
4774                 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
4775
4776                 if (def != data)
4777                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4778
4779                 /* MGLS is a global flag to control all MGLS in GFX */
4780                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
4781                         /* 2 - RLC memory Light sleep */
4782                         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
4783                                 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4784                                 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4785                                 if (def != data)
4786                                         WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4787                         }
4788                         /* 3 - CP memory Light sleep */
4789                         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
4790                                 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4791                                 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4792                                 if (def != data)
4793                                         WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4794                         }
4795                 }
4796         } else {
4797                 /* 1 - MGCG_OVERRIDE */
4798                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4799
4800                 if (adev->asic_type != CHIP_VEGA12)
4801                         data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
4802
4803                 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
4804                          RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4805                          RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4806                          RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4807
4808                 if (def != data)
4809                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4810
4811                 /* 2 - disable MGLS in RLC */
4812                 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4813                 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
4814                         data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4815                         WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4816                 }
4817
4818                 /* 3 - disable MGLS in CP */
4819                 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4820                 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
4821                         data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4822                         WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4823                 }
4824         }
4825
4826         amdgpu_gfx_rlc_exit_safe_mode(adev);
4827 }
4828
4829 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
4830                                            bool enable)
4831 {
4832         uint32_t data, def;
4833
4834         if (adev->asic_type == CHIP_ARCTURUS)
4835                 return;
4836
4837         amdgpu_gfx_rlc_enter_safe_mode(adev);
4838
4839         /* Enable 3D CGCG/CGLS */
4840         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
4841                 /* write cmd to clear cgcg/cgls ov */
4842                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4843                 /* unset CGCG override */
4844                 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
4845                 /* update CGCG and CGLS override bits */
4846                 if (def != data)
4847                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4848
4849                 /* enable 3Dcgcg FSM(0x0000363f) */
4850                 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4851
4852                 data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4853                         RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4854                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
4855                         data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4856                                 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
4857                 if (def != data)
4858                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4859
4860                 /* set IDLE_POLL_COUNT(0x00900100) */
4861                 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
4862                 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4863                         (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4864                 if (def != data)
4865                         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
4866         } else {
4867                 /* Disable CGCG/CGLS */
4868                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4869                 /* disable cgcg, cgls should be disabled */
4870                 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
4871                           RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
4872                 /* disable cgcg and cgls in FSM */
4873                 if (def != data)
4874                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4875         }
4876
4877         amdgpu_gfx_rlc_exit_safe_mode(adev);
4878 }
4879
4880 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
4881                                                       bool enable)
4882 {
4883         uint32_t def, data;
4884
4885         amdgpu_gfx_rlc_enter_safe_mode(adev);
4886
4887         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
4888                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4889                 /* unset CGCG override */
4890                 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
4891                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4892                         data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4893                 else
4894                         data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4895                 /* update CGCG and CGLS override bits */
4896                 if (def != data)
4897                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4898
4899                 /* enable cgcg FSM(0x0000363F) */
4900                 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4901
4902                 if (adev->asic_type == CHIP_ARCTURUS)
4903                         data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4904                                 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4905                 else
4906                         data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4907                                 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4908                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4909                         data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4910                                 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
4911                 if (def != data)
4912                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
4913
4914                 /* set IDLE_POLL_COUNT(0x00900100) */
4915                 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
4916                 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4917                         (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4918                 if (def != data)
4919                         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
4920         } else {
4921                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4922                 /* reset CGCG/CGLS bits */
4923                 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
4924                 /* disable cgcg and cgls in FSM */
4925                 if (def != data)
4926                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
4927         }
4928
4929         amdgpu_gfx_rlc_exit_safe_mode(adev);
4930 }
4931
4932 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
4933                                             bool enable)
4934 {
4935         if (enable) {
4936                 /* CGCG/CGLS should be enabled after MGCG/MGLS
4937                  * ===  MGCG + MGLS ===
4938                  */
4939                 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
4940                 /* ===  CGCG /CGLS for GFX 3D Only === */
4941                 gfx_v9_0_update_3d_clock_gating(adev, enable);
4942                 /* ===  CGCG + CGLS === */
4943                 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
4944         } else {
4945                 /* CGCG/CGLS should be disabled before MGCG/MGLS
4946                  * ===  CGCG + CGLS ===
4947                  */
4948                 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
4949                 /* ===  CGCG /CGLS for GFX 3D Only === */
4950                 gfx_v9_0_update_3d_clock_gating(adev, enable);
4951                 /* ===  MGCG + MGLS === */
4952                 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
4953         }
4954         return 0;
4955 }
4956
4957 static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
4958 {
4959         u32 data;
4960
4961         data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
4962
4963         data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
4964         data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
4965
4966         WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
4967 }
4968
4969 static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
4970                                         uint32_t offset,
4971                                         struct soc15_reg_rlcg *entries, int arr_size)
4972 {
4973         int i;
4974         uint32_t reg;
4975
4976         if (!entries)
4977                 return false;
4978
4979         for (i = 0; i < arr_size; i++) {
4980                 const struct soc15_reg_rlcg *entry;
4981
4982                 entry = &entries[i];
4983                 reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
4984                 if (offset == reg)
4985                         return true;
4986         }
4987
4988         return false;
4989 }
4990
4991 static bool gfx_v9_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
4992 {
4993         return gfx_v9_0_check_rlcg_range(adev, offset,
4994                                         (void *)rlcg_access_gc_9_0,
4995                                         ARRAY_SIZE(rlcg_access_gc_9_0));
4996 }
4997
4998 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
4999         .is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
5000         .set_safe_mode = gfx_v9_0_set_safe_mode,
5001         .unset_safe_mode = gfx_v9_0_unset_safe_mode,
5002         .init = gfx_v9_0_rlc_init,
5003         .get_csb_size = gfx_v9_0_get_csb_size,
5004         .get_csb_buffer = gfx_v9_0_get_csb_buffer,
5005         .get_cp_table_num = gfx_v9_0_cp_jump_table_num,
5006         .resume = gfx_v9_0_rlc_resume,
5007         .stop = gfx_v9_0_rlc_stop,
5008         .reset = gfx_v9_0_rlc_reset,
5009         .start = gfx_v9_0_rlc_start,
5010         .update_spm_vmid = gfx_v9_0_update_spm_vmid,
5011         .rlcg_wreg = gfx_v9_0_rlcg_wreg,
5012         .is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
5013 };
5014
5015 static int gfx_v9_0_set_powergating_state(void *handle,
5016                                           enum amd_powergating_state state)
5017 {
5018         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5019         bool enable = (state == AMD_PG_STATE_GATE);
5020
5021         switch (adev->asic_type) {
5022         case CHIP_RAVEN:
5023         case CHIP_RENOIR:
5024                 if (!enable) {
5025                         amdgpu_gfx_off_ctrl(adev, false);
5026                         cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
5027                 }
5028                 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
5029                         gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
5030                         gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
5031                 } else {
5032                         gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
5033                         gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
5034                 }
5035
5036                 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
5037                         gfx_v9_0_enable_cp_power_gating(adev, true);
5038                 else
5039                         gfx_v9_0_enable_cp_power_gating(adev, false);
5040
5041                 /* update gfx cgpg state */
5042                 gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
5043
5044                 /* update mgcg state */
5045                 gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
5046
5047                 if (enable)
5048                         amdgpu_gfx_off_ctrl(adev, true);
5049                 break;
5050         case CHIP_VEGA12:
5051                 if (!enable) {
5052                         amdgpu_gfx_off_ctrl(adev, false);
5053                         cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
5054                 } else {
5055                         amdgpu_gfx_off_ctrl(adev, true);
5056                 }
5057                 break;
5058         default:
5059                 break;
5060         }
5061
5062         return 0;
5063 }
5064
5065 static int gfx_v9_0_set_clockgating_state(void *handle,
5066                                           enum amd_clockgating_state state)
5067 {
5068         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5069
5070         if (amdgpu_sriov_vf(adev))
5071                 return 0;
5072
5073         switch (adev->asic_type) {
5074         case CHIP_VEGA10:
5075         case CHIP_VEGA12:
5076         case CHIP_VEGA20:
5077         case CHIP_RAVEN:
5078         case CHIP_ARCTURUS:
5079         case CHIP_RENOIR:
5080                 gfx_v9_0_update_gfx_clock_gating(adev,
5081                                                  state == AMD_CG_STATE_GATE);
5082                 break;
5083         default:
5084                 break;
5085         }
5086         return 0;
5087 }
5088
5089 static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
5090 {
5091         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5092         int data;
5093
5094         if (amdgpu_sriov_vf(adev))
5095                 *flags = 0;
5096
5097         /* AMD_CG_SUPPORT_GFX_MGCG */
5098         data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE));
5099         if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
5100                 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
5101
5102         /* AMD_CG_SUPPORT_GFX_CGCG */
5103         data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL));
5104         if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5105                 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
5106
5107         /* AMD_CG_SUPPORT_GFX_CGLS */
5108         if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5109                 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
5110
5111         /* AMD_CG_SUPPORT_GFX_RLC_LS */
5112         data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL));
5113         if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
5114                 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
5115
5116         /* AMD_CG_SUPPORT_GFX_CP_LS */
5117         data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL));
5118         if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
5119                 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
5120
5121         if (adev->asic_type != CHIP_ARCTURUS) {
5122                 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
5123                 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));
5124                 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
5125                         *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
5126
5127                 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
5128                 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
5129                         *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
5130         }
5131 }
5132
5133 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
5134 {
5135         return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 is 32bit rptr*/
5136 }
5137
5138 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
5139 {
5140         struct amdgpu_device *adev = ring->adev;
5141         u64 wptr;
5142
5143         /* XXX check if swapping is necessary on BE */
5144         if (ring->use_doorbell) {
5145                 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
5146         } else {
5147                 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
5148                 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
5149         }
5150
5151         return wptr;
5152 }
5153
5154 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
5155 {
5156         struct amdgpu_device *adev = ring->adev;
5157
5158         if (ring->use_doorbell) {
5159                 /* XXX check if swapping is necessary on BE */
5160                 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
5161                 WDOORBELL64(ring->doorbell_index, ring->wptr);
5162         } else {
5163                 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
5164                 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
5165         }
5166 }
5167
5168 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
5169 {
5170         struct amdgpu_device *adev = ring->adev;
5171         u32 ref_and_mask, reg_mem_engine;
5172         const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
5173
5174         if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
5175                 switch (ring->me) {
5176                 case 1:
5177                         ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
5178                         break;
5179                 case 2:
5180                         ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
5181                         break;
5182                 default:
5183                         return;
5184                 }
5185                 reg_mem_engine = 0;
5186         } else {
5187                 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
5188                 reg_mem_engine = 1; /* pfp */
5189         }
5190
5191         gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
5192                               adev->nbio.funcs->get_hdp_flush_req_offset(adev),
5193                               adev->nbio.funcs->get_hdp_flush_done_offset(adev),
5194                               ref_and_mask, ref_and_mask, 0x20);
5195 }
5196
5197 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
5198                                         struct amdgpu_job *job,
5199                                         struct amdgpu_ib *ib,
5200                                         uint32_t flags)
5201 {
5202         unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5203         u32 header, control = 0;
5204
5205         if (ib->flags & AMDGPU_IB_FLAG_CE)
5206                 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
5207         else
5208                 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
5209
5210         control |= ib->length_dw | (vmid << 24);
5211
5212         if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
5213                 control |= INDIRECT_BUFFER_PRE_ENB(1);
5214
5215                 if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
5216                         gfx_v9_0_ring_emit_de_meta(ring);
5217         }
5218
5219         amdgpu_ring_write(ring, header);
5220         BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5221         amdgpu_ring_write(ring,
5222 #ifdef __BIG_ENDIAN
5223                 (2 << 0) |
5224 #endif
5225                 lower_32_bits(ib->gpu_addr));
5226         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5227         amdgpu_ring_write(ring, control);
5228 }
5229
5230 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
5231                                           struct amdgpu_job *job,
5232                                           struct amdgpu_ib *ib,
5233                                           uint32_t flags)
5234 {
5235         unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5236         u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
5237
5238         /* Currently, there is a high possibility to get wave ID mismatch
5239          * between ME and GDS, leading to a hw deadlock, because ME generates
5240          * different wave IDs than the GDS expects. This situation happens
5241          * randomly when at least 5 compute pipes use GDS ordered append.
5242          * The wave IDs generated by ME are also wrong after suspend/resume.
5243          * Those are probably bugs somewhere else in the kernel driver.
5244          *
5245          * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
5246          * GDS to 0 for this ring (me/pipe).
5247          */
5248         if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
5249                 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
5250                 amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
5251                 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
5252         }
5253
5254         amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
5255         BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5256         amdgpu_ring_write(ring,
5257 #ifdef __BIG_ENDIAN
5258                                 (2 << 0) |
5259 #endif
5260                                 lower_32_bits(ib->gpu_addr));
5261         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5262         amdgpu_ring_write(ring, control);
5263 }
5264
5265 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
5266                                      u64 seq, unsigned flags)
5267 {
5268         bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
5269         bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
5270         bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
5271
5272         /* RELEASE_MEM - flush caches, send int */
5273         amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
5274         amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
5275                                                EOP_TC_NC_ACTION_EN) :
5276                                               (EOP_TCL1_ACTION_EN |
5277                                                EOP_TC_ACTION_EN |
5278                                                EOP_TC_WB_ACTION_EN |
5279                                                EOP_TC_MD_ACTION_EN)) |
5280                                  EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
5281                                  EVENT_INDEX(5)));
5282         amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
5283
5284         /*
5285          * the address should be Qword aligned if 64bit write, Dword
5286          * aligned if only send 32bit data low (discard data high)
5287          */
5288         if (write64bit)
5289                 BUG_ON(addr & 0x7);
5290         else
5291                 BUG_ON(addr & 0x3);
5292         amdgpu_ring_write(ring, lower_32_bits(addr));
5293         amdgpu_ring_write(ring, upper_32_bits(addr));
5294         amdgpu_ring_write(ring, lower_32_bits(seq));
5295         amdgpu_ring_write(ring, upper_32_bits(seq));
5296         amdgpu_ring_write(ring, 0);
5297 }
5298
5299 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
5300 {
5301         int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5302         uint32_t seq = ring->fence_drv.sync_seq;
5303         uint64_t addr = ring->fence_drv.gpu_addr;
5304
5305         gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
5306                               lower_32_bits(addr), upper_32_bits(addr),
5307                               seq, 0xffffffff, 4);
5308 }
5309
5310 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
5311                                         unsigned vmid, uint64_t pd_addr)
5312 {
5313         amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
5314
5315         /* compute doesn't have PFP */
5316         if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
5317                 /* sync PFP to ME, otherwise we might get invalid PFP reads */
5318                 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5319                 amdgpu_ring_write(ring, 0x0);
5320         }
5321 }
5322
5323 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
5324 {
5325         return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
5326 }
5327
5328 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
5329 {
5330         u64 wptr;
5331
5332         /* XXX check if swapping is necessary on BE */
5333         if (ring->use_doorbell)
5334                 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
5335         else
5336                 BUG();
5337         return wptr;
5338 }
5339
5340 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
5341 {
5342         struct amdgpu_device *adev = ring->adev;
5343
5344         /* XXX check if swapping is necessary on BE */
5345         if (ring->use_doorbell) {
5346                 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
5347                 WDOORBELL64(ring->doorbell_index, ring->wptr);
5348         } else{
5349                 BUG(); /* only DOORBELL method supported on gfx9 now */
5350         }
5351 }
5352
5353 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
5354                                          u64 seq, unsigned int flags)
5355 {
5356         struct amdgpu_device *adev = ring->adev;
5357
5358         /* we only allocate 32bit for each seq wb address */
5359         BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
5360
5361         /* write fence seq to the "addr" */
5362         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5363         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5364                                  WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
5365         amdgpu_ring_write(ring, lower_32_bits(addr));
5366         amdgpu_ring_write(ring, upper_32_bits(addr));
5367         amdgpu_ring_write(ring, lower_32_bits(seq));
5368
5369         if (flags & AMDGPU_FENCE_FLAG_INT) {
5370                 /* set register to trigger INT */
5371                 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5372                 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5373                                          WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
5374                 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
5375                 amdgpu_ring_write(ring, 0);
5376                 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
5377         }
5378 }
5379
5380 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
5381 {
5382         amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
5383         amdgpu_ring_write(ring, 0);
5384 }
5385
5386 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
5387 {
5388         struct v9_ce_ib_state ce_payload = {0};
5389         uint64_t csa_addr;
5390         int cnt;
5391
5392         cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
5393         csa_addr = amdgpu_csa_vaddr(ring->adev);
5394
5395         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5396         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
5397                                  WRITE_DATA_DST_SEL(8) |
5398                                  WR_CONFIRM) |
5399                                  WRITE_DATA_CACHE_POLICY(0));
5400         amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
5401         amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
5402         amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2);
5403 }
5404
5405 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
5406 {
5407         struct v9_de_ib_state de_payload = {0};
5408         uint64_t csa_addr, gds_addr;
5409         int cnt;
5410
5411         csa_addr = amdgpu_csa_vaddr(ring->adev);
5412         gds_addr = csa_addr + 4096;
5413         de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
5414         de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
5415
5416         cnt = (sizeof(de_payload) >> 2) + 4 - 2;
5417         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5418         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5419                                  WRITE_DATA_DST_SEL(8) |
5420                                  WR_CONFIRM) |
5421                                  WRITE_DATA_CACHE_POLICY(0));
5422         amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
5423         amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
5424         amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
5425 }
5426
5427 static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
5428 {
5429         amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
5430         amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
5431 }
5432
5433 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
5434 {
5435         uint32_t dw2 = 0;
5436
5437         if (amdgpu_sriov_vf(ring->adev))
5438                 gfx_v9_0_ring_emit_ce_meta(ring);
5439
5440         gfx_v9_0_ring_emit_tmz(ring, true);
5441
5442         dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
5443         if (flags & AMDGPU_HAVE_CTX_SWITCH) {
5444                 /* set load_global_config & load_global_uconfig */
5445                 dw2 |= 0x8001;
5446                 /* set load_cs_sh_regs */
5447                 dw2 |= 0x01000000;
5448                 /* set load_per_context_state & load_gfx_sh_regs for GFX */
5449                 dw2 |= 0x10002;
5450
5451                 /* set load_ce_ram if preamble presented */
5452                 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
5453                         dw2 |= 0x10000000;
5454         } else {
5455                 /* still load_ce_ram if this is the first time preamble presented
5456                  * although there is no context switch happens.
5457                  */
5458                 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
5459                         dw2 |= 0x10000000;
5460         }
5461
5462         amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5463         amdgpu_ring_write(ring, dw2);
5464         amdgpu_ring_write(ring, 0);
5465 }
5466
5467 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
5468 {
5469         unsigned ret;
5470         amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
5471         amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
5472         amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
5473         amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
5474         ret = ring->wptr & ring->buf_mask;
5475         amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
5476         return ret;
5477 }
5478
5479 static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
5480 {
5481         unsigned cur;
5482         BUG_ON(offset > ring->buf_mask);
5483         BUG_ON(ring->ring[offset] != 0x55aa55aa);
5484
5485         cur = (ring->wptr & ring->buf_mask) - 1;
5486         if (likely(cur > offset))
5487                 ring->ring[offset] = cur - offset;
5488         else
5489                 ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
5490 }
5491
5492 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
5493 {
5494         struct amdgpu_device *adev = ring->adev;
5495         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
5496
5497         amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
5498         amdgpu_ring_write(ring, 0 |     /* src: register*/
5499                                 (5 << 8) |      /* dst: memory */
5500                                 (1 << 20));     /* write confirm */
5501         amdgpu_ring_write(ring, reg);
5502         amdgpu_ring_write(ring, 0);
5503         amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
5504                                 kiq->reg_val_offs * 4));
5505         amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
5506                                 kiq->reg_val_offs * 4));
5507 }
5508
5509 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
5510                                     uint32_t val)
5511 {
5512         uint32_t cmd = 0;
5513
5514         switch (ring->funcs->type) {
5515         case AMDGPU_RING_TYPE_GFX:
5516                 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
5517                 break;
5518         case AMDGPU_RING_TYPE_KIQ:
5519                 cmd = (1 << 16); /* no inc addr */
5520                 break;
5521         default:
5522                 cmd = WR_CONFIRM;
5523                 break;
5524         }
5525         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5526         amdgpu_ring_write(ring, cmd);
5527         amdgpu_ring_write(ring, reg);
5528         amdgpu_ring_write(ring, 0);
5529         amdgpu_ring_write(ring, val);
5530 }
5531
5532 static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
5533                                         uint32_t val, uint32_t mask)
5534 {
5535         gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
5536 }
5537
5538 static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
5539                                                   uint32_t reg0, uint32_t reg1,
5540                                                   uint32_t ref, uint32_t mask)
5541 {
5542         int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5543         struct amdgpu_device *adev = ring->adev;
5544         bool fw_version_ok = (ring->funcs->type == AMDGPU_RING_TYPE_GFX) ?
5545                 adev->gfx.me_fw_write_wait : adev->gfx.mec_fw_write_wait;
5546
5547         if (fw_version_ok)
5548                 gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
5549                                       ref, mask, 0x20);
5550         else
5551                 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
5552                                                            ref, mask);
5553 }
5554
5555 static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
5556 {
5557         struct amdgpu_device *adev = ring->adev;
5558         uint32_t value = 0;
5559
5560         value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
5561         value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
5562         value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
5563         value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
5564         WREG32_SOC15(GC, 0, mmSQ_CMD, value);
5565 }
5566
5567 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
5568                                                  enum amdgpu_interrupt_state state)
5569 {
5570         switch (state) {
5571         case AMDGPU_IRQ_STATE_DISABLE:
5572         case AMDGPU_IRQ_STATE_ENABLE:
5573                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5574                                TIME_STAMP_INT_ENABLE,
5575                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5576                 break;
5577         default:
5578                 break;
5579         }
5580 }
5581
5582 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
5583                                                      int me, int pipe,
5584                                                      enum amdgpu_interrupt_state state)
5585 {
5586         u32 mec_int_cntl, mec_int_cntl_reg;
5587
5588         /*
5589          * amdgpu controls only the first MEC. That's why this function only
5590          * handles the setting of interrupts for this specific MEC. All other
5591          * pipes' interrupts are set by amdkfd.
5592          */
5593
5594         if (me == 1) {
5595                 switch (pipe) {
5596                 case 0:
5597                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
5598                         break;
5599                 case 1:
5600                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
5601                         break;
5602                 case 2:
5603                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
5604                         break;
5605                 case 3:
5606                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
5607                         break;
5608                 default:
5609                         DRM_DEBUG("invalid pipe %d\n", pipe);
5610                         return;
5611                 }
5612         } else {
5613                 DRM_DEBUG("invalid me %d\n", me);
5614                 return;
5615         }
5616
5617         switch (state) {
5618         case AMDGPU_IRQ_STATE_DISABLE:
5619                 mec_int_cntl = RREG32(mec_int_cntl_reg);
5620                 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5621                                              TIME_STAMP_INT_ENABLE, 0);
5622                 WREG32(mec_int_cntl_reg, mec_int_cntl);
5623                 break;
5624         case AMDGPU_IRQ_STATE_ENABLE:
5625                 mec_int_cntl = RREG32(mec_int_cntl_reg);
5626                 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5627                                              TIME_STAMP_INT_ENABLE, 1);
5628                 WREG32(mec_int_cntl_reg, mec_int_cntl);
5629                 break;
5630         default:
5631                 break;
5632         }
5633 }
5634
5635 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
5636                                              struct amdgpu_irq_src *source,
5637                                              unsigned type,
5638                                              enum amdgpu_interrupt_state state)
5639 {
5640         switch (state) {
5641         case AMDGPU_IRQ_STATE_DISABLE:
5642         case AMDGPU_IRQ_STATE_ENABLE:
5643                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5644                                PRIV_REG_INT_ENABLE,
5645                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5646                 break;
5647         default:
5648                 break;
5649         }
5650
5651         return 0;
5652 }
5653
5654 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
5655                                               struct amdgpu_irq_src *source,
5656                                               unsigned type,
5657                                               enum amdgpu_interrupt_state state)
5658 {
5659         switch (state) {
5660         case AMDGPU_IRQ_STATE_DISABLE:
5661         case AMDGPU_IRQ_STATE_ENABLE:
5662                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5663                                PRIV_INSTR_INT_ENABLE,
5664                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5665         default:
5666                 break;
5667         }
5668
5669         return 0;
5670 }
5671
5672 #define ENABLE_ECC_ON_ME_PIPE(me, pipe)                         \
5673         WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
5674                         CP_ECC_ERROR_INT_ENABLE, 1)
5675
5676 #define DISABLE_ECC_ON_ME_PIPE(me, pipe)                        \
5677         WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
5678                         CP_ECC_ERROR_INT_ENABLE, 0)
5679
5680 static int gfx_v9_0_set_cp_ecc_error_state(struct amdgpu_device *adev,
5681                                               struct amdgpu_irq_src *source,
5682                                               unsigned type,
5683                                               enum amdgpu_interrupt_state state)
5684 {
5685         switch (state) {
5686         case AMDGPU_IRQ_STATE_DISABLE:
5687                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5688                                 CP_ECC_ERROR_INT_ENABLE, 0);
5689                 DISABLE_ECC_ON_ME_PIPE(1, 0);
5690                 DISABLE_ECC_ON_ME_PIPE(1, 1);
5691                 DISABLE_ECC_ON_ME_PIPE(1, 2);
5692                 DISABLE_ECC_ON_ME_PIPE(1, 3);
5693                 break;
5694
5695         case AMDGPU_IRQ_STATE_ENABLE:
5696                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5697                                 CP_ECC_ERROR_INT_ENABLE, 1);
5698                 ENABLE_ECC_ON_ME_PIPE(1, 0);
5699                 ENABLE_ECC_ON_ME_PIPE(1, 1);
5700                 ENABLE_ECC_ON_ME_PIPE(1, 2);
5701                 ENABLE_ECC_ON_ME_PIPE(1, 3);
5702                 break;
5703         default:
5704                 break;
5705         }
5706
5707         return 0;
5708 }
5709
5710
5711 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
5712                                             struct amdgpu_irq_src *src,
5713                                             unsigned type,
5714                                             enum amdgpu_interrupt_state state)
5715 {
5716         switch (type) {
5717         case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
5718                 gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
5719                 break;
5720         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
5721                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
5722                 break;
5723         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
5724                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
5725                 break;
5726         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
5727                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
5728                 break;
5729         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
5730                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
5731                 break;
5732         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
5733                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
5734                 break;
5735         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
5736                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
5737                 break;
5738         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
5739                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
5740                 break;
5741         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
5742                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
5743                 break;
5744         default:
5745                 break;
5746         }
5747         return 0;
5748 }
5749
5750 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
5751                             struct amdgpu_irq_src *source,
5752                             struct amdgpu_iv_entry *entry)
5753 {
5754         int i;
5755         u8 me_id, pipe_id, queue_id;
5756         struct amdgpu_ring *ring;
5757
5758         DRM_DEBUG("IH: CP EOP\n");
5759         me_id = (entry->ring_id & 0x0c) >> 2;
5760         pipe_id = (entry->ring_id & 0x03) >> 0;
5761         queue_id = (entry->ring_id & 0x70) >> 4;
5762
5763         switch (me_id) {
5764         case 0:
5765                 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
5766                 break;
5767         case 1:
5768         case 2:
5769                 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5770                         ring = &adev->gfx.compute_ring[i];
5771                         /* Per-queue interrupt is supported for MEC starting from VI.
5772                           * The interrupt can only be enabled/disabled per pipe instead of per queue.
5773                           */
5774                         if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
5775                                 amdgpu_fence_process(ring);
5776                 }
5777                 break;
5778         }
5779         return 0;
5780 }
5781
5782 static void gfx_v9_0_fault(struct amdgpu_device *adev,
5783                            struct amdgpu_iv_entry *entry)
5784 {
5785         u8 me_id, pipe_id, queue_id;
5786         struct amdgpu_ring *ring;
5787         int i;
5788
5789         me_id = (entry->ring_id & 0x0c) >> 2;
5790         pipe_id = (entry->ring_id & 0x03) >> 0;
5791         queue_id = (entry->ring_id & 0x70) >> 4;
5792
5793         switch (me_id) {
5794         case 0:
5795                 drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
5796                 break;
5797         case 1:
5798         case 2:
5799                 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5800                         ring = &adev->gfx.compute_ring[i];
5801                         if (ring->me == me_id && ring->pipe == pipe_id &&
5802                             ring->queue == queue_id)
5803                                 drm_sched_fault(&ring->sched);
5804                 }
5805                 break;
5806         }
5807 }
5808
5809 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
5810                                  struct amdgpu_irq_src *source,
5811                                  struct amdgpu_iv_entry *entry)
5812 {
5813         DRM_ERROR("Illegal register access in command stream\n");
5814         gfx_v9_0_fault(adev, entry);
5815         return 0;
5816 }
5817
5818 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
5819                                   struct amdgpu_irq_src *source,
5820                                   struct amdgpu_iv_entry *entry)
5821 {
5822         DRM_ERROR("Illegal instruction in command stream\n");
5823         gfx_v9_0_fault(adev, entry);
5824         return 0;
5825 }
5826
5827
5828 static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = {
5829         { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
5830           SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
5831           SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT)
5832         },
5833         { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT),
5834           SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
5835           SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT)
5836         },
5837         { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
5838           SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME1),
5839           0, 0
5840         },
5841         { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
5842           SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME2),
5843           0, 0
5844         },
5845         { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT),
5846           SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
5847           SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT)
5848         },
5849         { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
5850           SOC15_REG_FIELD(CPG_EDC_DMA_CNT, ROQ_COUNT),
5851           0, 0
5852         },
5853         { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
5854           SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_SEC_COUNT),
5855           SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_DED_COUNT)
5856         },
5857         { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT),
5858           SOC15_REG_FIELD(CPG_EDC_TAG_CNT, SEC_COUNT),
5859           SOC15_REG_FIELD(CPG_EDC_TAG_CNT, DED_COUNT)
5860         },
5861         { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
5862           SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, COUNT_ME1),
5863           0, 0
5864         },
5865         { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
5866           SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, COUNT_ME1),
5867           0, 0
5868         },
5869         { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT),
5870           SOC15_REG_FIELD(DC_EDC_STATE_CNT, COUNT_ME1),
5871           0, 0
5872         },
5873         { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
5874           SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
5875           SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED)
5876         },
5877         { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
5878           SOC15_REG_FIELD(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED),
5879           0, 0
5880         },
5881         { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
5882           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
5883           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED)
5884         },
5885         { "GDS_OA_PHY_PHY_CMD_RAM_MEM",
5886           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
5887           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
5888           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED)
5889         },
5890         { "GDS_OA_PHY_PHY_DATA_RAM_MEM",
5891           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
5892           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED),
5893           0, 0
5894         },
5895         { "GDS_OA_PIPE_ME1_PIPE0_PIPE_MEM",
5896           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
5897           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
5898           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED)
5899         },
5900         { "GDS_OA_PIPE_ME1_PIPE1_PIPE_MEM",
5901           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
5902           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
5903           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED)
5904         },
5905         { "GDS_OA_PIPE_ME1_PIPE2_PIPE_MEM",
5906           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
5907           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
5908           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED)
5909         },
5910         { "GDS_OA_PIPE_ME1_PIPE3_PIPE_MEM",
5911           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
5912           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
5913           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED)
5914         },
5915         { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
5916           SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT),
5917           0, 0
5918         },
5919         { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5920           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
5921           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT)
5922         },
5923         { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5924           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT),
5925           0, 0
5926         },
5927         { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5928           SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT),
5929           0, 0
5930         },
5931         { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5932           SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT),
5933           0, 0
5934         },
5935         { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5936           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT),
5937           0, 0
5938         },
5939         { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
5940           SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT),
5941           0, 0
5942         },
5943         { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
5944           SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SED_COUNT),
5945           0, 0
5946         },
5947         { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5948           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
5949           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT)
5950         },
5951         { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5952           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
5953           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT)
5954         },
5955         { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5956           SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
5957           SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT)
5958         },
5959         { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5960           SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
5961           SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT)
5962         },
5963         { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5964           SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
5965           SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT)
5966         },
5967         { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5968           SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT),
5969           0, 0
5970         },
5971         { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5972           SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT),
5973           0, 0
5974         },
5975         { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5976           SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT),
5977           0, 0
5978         },
5979         { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5980           SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_DATA_SED_COUNT),
5981           0, 0
5982         },
5983         { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5984           SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT),
5985           0, 0
5986         },
5987         { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5988           SOC15_REG_FIELD(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT),
5989           0, 0
5990         },
5991         { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
5992           SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT),
5993           0, 0
5994         },
5995         { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
5996           SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT),
5997           0, 0
5998         },
5999         { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6000           SOC15_REG_FIELD(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT),
6001           0, 0
6002         },
6003         { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6004           SOC15_REG_FIELD(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT),
6005           0, 0
6006         },
6007         { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6008           SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT),
6009           0, 0
6010         },
6011         { "TCC_WRRET_TAG_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6012           SOC15_REG_FIELD(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT),
6013           0, 0
6014         },
6015         { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6016           SOC15_REG_FIELD(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT),
6017           0, 0
6018         },
6019         { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT),
6020           SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SED_COUNT),
6021           0, 0
6022         },
6023         { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6024           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
6025           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT)
6026         },
6027         { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6028           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
6029           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT)
6030         },
6031         { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6032           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT),
6033           0, 0
6034         },
6035         { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6036           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
6037           0, 0
6038         },
6039         { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6040           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT),
6041           0, 0
6042         },
6043         { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6044           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
6045           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT)
6046         },
6047         { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6048           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
6049           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT)
6050         },
6051         { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6052           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
6053           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT)
6054         },
6055         { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6056           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
6057           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT)
6058         },
6059         { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6060           SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SED_COUNT),
6061           0, 0
6062         },
6063         { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6064           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
6065           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT)
6066         },
6067         { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6068           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
6069           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT)
6070         },
6071         { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6072           SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
6073           SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT)
6074         },
6075         { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6076           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
6077           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT)
6078         },
6079         { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6080           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
6081           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT)
6082         },
6083         { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6084           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
6085           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT)
6086         },
6087         { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6088           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
6089           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT)
6090         },
6091         { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6092           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
6093           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT)
6094         },
6095         { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6096           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
6097           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT)
6098         },
6099         { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6100           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
6101           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT)
6102         },
6103         { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6104           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
6105           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT)
6106         },
6107         { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6108           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
6109           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT)
6110         },
6111         { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6112           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
6113           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT)
6114         },
6115         { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6116           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
6117           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT)
6118         },
6119         { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6120           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
6121           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT)
6122         },
6123         { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6124           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
6125           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT)
6126         },
6127         { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6128           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
6129           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT)
6130         },
6131         { "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6132           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT),
6133           0, 0
6134         },
6135         { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6136           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT),
6137           0, 0
6138         },
6139         { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6140           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT),
6141           0, 0
6142         },
6143         { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6144           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT),
6145           0, 0
6146         },
6147         { "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6148           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT),
6149           0, 0
6150         },
6151         { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6152           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
6153           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT)
6154         },
6155         { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6156           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
6157           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT)
6158         },
6159         { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6160           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
6161           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT)
6162         },
6163         { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6164           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
6165           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT)
6166         },
6167         { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6168           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
6169           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT)
6170         },
6171         { "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6172           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT),
6173           0, 0
6174         },
6175         { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6176           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT),
6177           0, 0
6178         },
6179         { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6180           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT),
6181           0, 0
6182         },
6183         { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6184           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT),
6185           0, 0
6186         },
6187         { "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6188           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT),
6189           0, 0
6190         },
6191         { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6192           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
6193           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT)
6194         },
6195         { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6196           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
6197           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT)
6198         },
6199         { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6200           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
6201           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT)
6202         },
6203         { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6204           SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
6205           SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT)
6206         },
6207         { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6208           SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
6209           SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT)
6210         },
6211         { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6212           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
6213           0, 0
6214         },
6215         { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6216           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
6217           0, 0
6218         },
6219         { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6220           SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT),
6221           0, 0
6222         },
6223         { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6224           SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
6225           0, 0
6226         },
6227         { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6228           SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
6229           0, 0
6230         },
6231         { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6232           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
6233           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT)
6234         },
6235         { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6236           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
6237           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT)
6238         },
6239         { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6240           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
6241           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT)
6242         },
6243         { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6244           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
6245           0, 0
6246         },
6247         { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6248           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
6249           0, 0
6250         },
6251         { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6252           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
6253           0, 0
6254         },
6255         { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6256           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
6257           0, 0
6258         },
6259         { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6260           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
6261           0, 0
6262         },
6263         { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6264           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
6265           0, 0
6266         }
6267 };
6268
6269 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
6270                                      void *inject_if)
6271 {
6272         struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
6273         int ret;
6274         struct ta_ras_trigger_error_input block_info = { 0 };
6275
6276         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6277                 return -EINVAL;
6278
6279         if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks))
6280                 return -EINVAL;
6281
6282         if (!ras_gfx_subblocks[info->head.sub_block_index].name)
6283                 return -EPERM;
6284
6285         if (!(ras_gfx_subblocks[info->head.sub_block_index].hw_supported_error_type &
6286               info->head.type)) {
6287                 DRM_ERROR("GFX Subblock %s, hardware do not support type 0x%x\n",
6288                         ras_gfx_subblocks[info->head.sub_block_index].name,
6289                         info->head.type);
6290                 return -EPERM;
6291         }
6292
6293         if (!(ras_gfx_subblocks[info->head.sub_block_index].sw_supported_error_type &
6294               info->head.type)) {
6295                 DRM_ERROR("GFX Subblock %s, driver do not support type 0x%x\n",
6296                         ras_gfx_subblocks[info->head.sub_block_index].name,
6297                         info->head.type);
6298                 return -EPERM;
6299         }
6300
6301         block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
6302         block_info.sub_block_index =
6303                 ras_gfx_subblocks[info->head.sub_block_index].ta_subblock;
6304         block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
6305         block_info.address = info->address;
6306         block_info.value = info->value;
6307
6308         mutex_lock(&adev->grbm_idx_mutex);
6309         ret = psp_ras_trigger_error(&adev->psp, &block_info);
6310         mutex_unlock(&adev->grbm_idx_mutex);
6311
6312         return ret;
6313 }
6314
6315 static const char *vml2_mems[] = {
6316         "UTC_VML2_BANK_CACHE_0_BIGK_MEM0",
6317         "UTC_VML2_BANK_CACHE_0_BIGK_MEM1",
6318         "UTC_VML2_BANK_CACHE_0_4K_MEM0",
6319         "UTC_VML2_BANK_CACHE_0_4K_MEM1",
6320         "UTC_VML2_BANK_CACHE_1_BIGK_MEM0",
6321         "UTC_VML2_BANK_CACHE_1_BIGK_MEM1",
6322         "UTC_VML2_BANK_CACHE_1_4K_MEM0",
6323         "UTC_VML2_BANK_CACHE_1_4K_MEM1",
6324         "UTC_VML2_BANK_CACHE_2_BIGK_MEM0",
6325         "UTC_VML2_BANK_CACHE_2_BIGK_MEM1",
6326         "UTC_VML2_BANK_CACHE_2_4K_MEM0",
6327         "UTC_VML2_BANK_CACHE_2_4K_MEM1",
6328         "UTC_VML2_BANK_CACHE_3_BIGK_MEM0",
6329         "UTC_VML2_BANK_CACHE_3_BIGK_MEM1",
6330         "UTC_VML2_BANK_CACHE_3_4K_MEM0",
6331         "UTC_VML2_BANK_CACHE_3_4K_MEM1",
6332 };
6333
6334 static const char *vml2_walker_mems[] = {
6335         "UTC_VML2_CACHE_PDE0_MEM0",
6336         "UTC_VML2_CACHE_PDE0_MEM1",
6337         "UTC_VML2_CACHE_PDE1_MEM0",
6338         "UTC_VML2_CACHE_PDE1_MEM1",
6339         "UTC_VML2_CACHE_PDE2_MEM0",
6340         "UTC_VML2_CACHE_PDE2_MEM1",
6341         "UTC_VML2_RDIF_LOG_FIFO",
6342 };
6343
6344 static const char *atc_l2_cache_2m_mems[] = {
6345         "UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM",
6346         "UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM",
6347         "UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM",
6348         "UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM",
6349 };
6350
6351 static const char *atc_l2_cache_4k_mems[] = {
6352         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0",
6353         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1",
6354         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2",
6355         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3",
6356         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4",
6357         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5",
6358         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6",
6359         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7",
6360         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0",
6361         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1",
6362         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2",
6363         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3",
6364         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4",
6365         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5",
6366         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6",
6367         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7",
6368         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0",
6369         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1",
6370         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2",
6371         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3",
6372         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4",
6373         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5",
6374         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6",
6375         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7",
6376         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0",
6377         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1",
6378         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2",
6379         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3",
6380         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4",
6381         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5",
6382         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6",
6383         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7",
6384 };
6385
6386 static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
6387                                          struct ras_err_data *err_data)
6388 {
6389         uint32_t i, data;
6390         uint32_t sec_count, ded_count;
6391
6392         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6393         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
6394         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6395         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
6396         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6397         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
6398         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6399         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
6400
6401         for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
6402                 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
6403                 data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
6404
6405                 sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT);
6406                 if (sec_count) {
6407                         DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
6408                                  vml2_mems[i], sec_count);
6409                         err_data->ce_count += sec_count;
6410                 }
6411
6412                 ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT);
6413                 if (ded_count) {
6414                         DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
6415                                  vml2_mems[i], ded_count);
6416                         err_data->ue_count += ded_count;
6417                 }
6418         }
6419
6420         for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
6421                 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
6422                 data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
6423
6424                 sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6425                                                 SEC_COUNT);
6426                 if (sec_count) {
6427                         DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
6428                                  vml2_walker_mems[i], sec_count);
6429                         err_data->ce_count += sec_count;
6430                 }
6431
6432                 ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6433                                                 DED_COUNT);
6434                 if (ded_count) {
6435                         DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
6436                                  vml2_walker_mems[i], ded_count);
6437                         err_data->ue_count += ded_count;
6438                 }
6439         }
6440
6441         for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
6442                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
6443                 data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
6444
6445                 sec_count = (data & 0x00006000L) >> 0xd;
6446                 if (sec_count) {
6447                         DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
6448                                  atc_l2_cache_2m_mems[i], sec_count);
6449                         err_data->ce_count += sec_count;
6450                 }
6451         }
6452
6453         for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
6454                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
6455                 data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
6456
6457                 sec_count = (data & 0x00006000L) >> 0xd;
6458                 if (sec_count) {
6459                         DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
6460                                  atc_l2_cache_4k_mems[i], sec_count);
6461                         err_data->ce_count += sec_count;
6462                 }
6463
6464                 ded_count = (data & 0x00018000L) >> 0xf;
6465                 if (ded_count) {
6466                         DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
6467                                  atc_l2_cache_4k_mems[i], ded_count);
6468                         err_data->ue_count += ded_count;
6469                 }
6470         }
6471
6472         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6473         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6474         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6475         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6476
6477         return 0;
6478 }
6479
6480 static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg,
6481         uint32_t se_id, uint32_t inst_id, uint32_t value,
6482         uint32_t *sec_count, uint32_t *ded_count)
6483 {
6484         uint32_t i;
6485         uint32_t sec_cnt, ded_cnt;
6486
6487         for (i = 0; i < ARRAY_SIZE(gfx_v9_0_ras_fields); i++) {
6488                 if(gfx_v9_0_ras_fields[i].reg_offset != reg->reg_offset ||
6489                         gfx_v9_0_ras_fields[i].seg != reg->seg ||
6490                         gfx_v9_0_ras_fields[i].inst != reg->inst)
6491                         continue;
6492
6493                 sec_cnt = (value &
6494                                 gfx_v9_0_ras_fields[i].sec_count_mask) >>
6495                                 gfx_v9_0_ras_fields[i].sec_count_shift;
6496                 if (sec_cnt) {
6497                         DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
6498                                 gfx_v9_0_ras_fields[i].name,
6499                                 se_id, inst_id,
6500                                 sec_cnt);
6501                         *sec_count += sec_cnt;
6502                 }
6503
6504                 ded_cnt = (value &
6505                                 gfx_v9_0_ras_fields[i].ded_count_mask) >>
6506                                 gfx_v9_0_ras_fields[i].ded_count_shift;
6507                 if (ded_cnt) {
6508                         DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n",
6509                                 gfx_v9_0_ras_fields[i].name,
6510                                 se_id, inst_id,
6511                                 ded_cnt);
6512                         *ded_count += ded_cnt;
6513                 }
6514         }
6515
6516         return 0;
6517 }
6518
6519 static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
6520 {
6521         int i, j, k;
6522
6523         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6524                 return;
6525
6526         /* read back registers to clear the counters */
6527         mutex_lock(&adev->grbm_idx_mutex);
6528         for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
6529                 for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
6530                         for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
6531                                 gfx_v9_0_select_se_sh(adev, j, 0x0, k);
6532                                 RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
6533                         }
6534                 }
6535         }
6536         WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
6537         mutex_unlock(&adev->grbm_idx_mutex);
6538
6539         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6540         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
6541         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6542         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
6543         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6544         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
6545         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6546         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
6547
6548         for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
6549                 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
6550                 RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
6551         }
6552
6553         for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
6554                 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
6555                 RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
6556         }
6557
6558         for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
6559                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
6560                 RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
6561         }
6562
6563         for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
6564                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
6565                 RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
6566         }
6567
6568         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6569         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6570         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6571         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6572 }
6573
6574 static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
6575                                           void *ras_error_status)
6576 {
6577         struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
6578         uint32_t sec_count = 0, ded_count = 0;
6579         uint32_t i, j, k;
6580         uint32_t reg_value;
6581
6582         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6583                 return -EINVAL;
6584
6585         err_data->ue_count = 0;
6586         err_data->ce_count = 0;
6587
6588         mutex_lock(&adev->grbm_idx_mutex);
6589
6590         for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
6591                 for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
6592                         for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
6593                                 gfx_v9_0_select_se_sh(adev, j, 0, k);
6594                                 reg_value =
6595                                         RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
6596                                 if (reg_value)
6597                                         gfx_v9_0_ras_error_count(&gfx_v9_0_edc_counter_regs[i],
6598                                                         j, k, reg_value,
6599                                                         &sec_count, &ded_count);
6600                         }
6601                 }
6602         }
6603
6604         err_data->ce_count += sec_count;
6605         err_data->ue_count += ded_count;
6606
6607         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
6608         mutex_unlock(&adev->grbm_idx_mutex);
6609
6610         gfx_v9_0_query_utc_edc_status(adev, err_data);
6611
6612         return 0;
6613 }
6614
6615 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
6616         .name = "gfx_v9_0",
6617         .early_init = gfx_v9_0_early_init,
6618         .late_init = gfx_v9_0_late_init,
6619         .sw_init = gfx_v9_0_sw_init,
6620         .sw_fini = gfx_v9_0_sw_fini,
6621         .hw_init = gfx_v9_0_hw_init,
6622         .hw_fini = gfx_v9_0_hw_fini,
6623         .suspend = gfx_v9_0_suspend,
6624         .resume = gfx_v9_0_resume,
6625         .is_idle = gfx_v9_0_is_idle,
6626         .wait_for_idle = gfx_v9_0_wait_for_idle,
6627         .soft_reset = gfx_v9_0_soft_reset,
6628         .set_clockgating_state = gfx_v9_0_set_clockgating_state,
6629         .set_powergating_state = gfx_v9_0_set_powergating_state,
6630         .get_clockgating_state = gfx_v9_0_get_clockgating_state,
6631 };
6632
6633 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
6634         .type = AMDGPU_RING_TYPE_GFX,
6635         .align_mask = 0xff,
6636         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6637         .support_64bit_ptrs = true,
6638         .vmhub = AMDGPU_GFXHUB_0,
6639         .get_rptr = gfx_v9_0_ring_get_rptr_gfx,
6640         .get_wptr = gfx_v9_0_ring_get_wptr_gfx,
6641         .set_wptr = gfx_v9_0_ring_set_wptr_gfx,
6642         .emit_frame_size = /* totally 242 maximum if 16 IBs */
6643                 5 +  /* COND_EXEC */
6644                 7 +  /* PIPELINE_SYNC */
6645                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6646                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6647                 2 + /* VM_FLUSH */
6648                 8 +  /* FENCE for VM_FLUSH */
6649                 20 + /* GDS switch */
6650                 4 + /* double SWITCH_BUFFER,
6651                        the first COND_EXEC jump to the place just
6652                            prior to this double SWITCH_BUFFER  */
6653                 5 + /* COND_EXEC */
6654                 7 +      /*     HDP_flush */
6655                 4 +      /*     VGT_flush */
6656                 14 + /* CE_META */
6657                 31 + /* DE_META */
6658                 3 + /* CNTX_CTRL */
6659                 5 + /* HDP_INVL */
6660                 8 + 8 + /* FENCE x2 */
6661                 2, /* SWITCH_BUFFER */
6662         .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
6663         .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
6664         .emit_fence = gfx_v9_0_ring_emit_fence,
6665         .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
6666         .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
6667         .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
6668         .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
6669         .test_ring = gfx_v9_0_ring_test_ring,
6670         .test_ib = gfx_v9_0_ring_test_ib,
6671         .insert_nop = amdgpu_ring_insert_nop,
6672         .pad_ib = amdgpu_ring_generic_pad_ib,
6673         .emit_switch_buffer = gfx_v9_ring_emit_sb,
6674         .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
6675         .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
6676         .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
6677         .emit_tmz = gfx_v9_0_ring_emit_tmz,
6678         .emit_wreg = gfx_v9_0_ring_emit_wreg,
6679         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6680         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6681         .soft_recovery = gfx_v9_0_ring_soft_recovery,
6682 };
6683
6684 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
6685         .type = AMDGPU_RING_TYPE_COMPUTE,
6686         .align_mask = 0xff,
6687         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6688         .support_64bit_ptrs = true,
6689         .vmhub = AMDGPU_GFXHUB_0,
6690         .get_rptr = gfx_v9_0_ring_get_rptr_compute,
6691         .get_wptr = gfx_v9_0_ring_get_wptr_compute,
6692         .set_wptr = gfx_v9_0_ring_set_wptr_compute,
6693         .emit_frame_size =
6694                 20 + /* gfx_v9_0_ring_emit_gds_switch */
6695                 7 + /* gfx_v9_0_ring_emit_hdp_flush */
6696                 5 + /* hdp invalidate */
6697                 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
6698                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6699                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6700                 2 + /* gfx_v9_0_ring_emit_vm_flush */
6701                 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
6702         .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
6703         .emit_ib = gfx_v9_0_ring_emit_ib_compute,
6704         .emit_fence = gfx_v9_0_ring_emit_fence,
6705         .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
6706         .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
6707         .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
6708         .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
6709         .test_ring = gfx_v9_0_ring_test_ring,
6710         .test_ib = gfx_v9_0_ring_test_ib,
6711         .insert_nop = amdgpu_ring_insert_nop,
6712         .pad_ib = amdgpu_ring_generic_pad_ib,
6713         .emit_wreg = gfx_v9_0_ring_emit_wreg,
6714         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6715         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6716 };
6717
6718 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
6719         .type = AMDGPU_RING_TYPE_KIQ,
6720         .align_mask = 0xff,
6721         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6722         .support_64bit_ptrs = true,
6723         .vmhub = AMDGPU_GFXHUB_0,
6724         .get_rptr = gfx_v9_0_ring_get_rptr_compute,
6725         .get_wptr = gfx_v9_0_ring_get_wptr_compute,
6726         .set_wptr = gfx_v9_0_ring_set_wptr_compute,
6727         .emit_frame_size =
6728                 20 + /* gfx_v9_0_ring_emit_gds_switch */
6729                 7 + /* gfx_v9_0_ring_emit_hdp_flush */
6730                 5 + /* hdp invalidate */
6731                 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
6732                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6733                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6734                 2 + /* gfx_v9_0_ring_emit_vm_flush */
6735                 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
6736         .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
6737         .emit_fence = gfx_v9_0_ring_emit_fence_kiq,
6738         .test_ring = gfx_v9_0_ring_test_ring,
6739         .insert_nop = amdgpu_ring_insert_nop,
6740         .pad_ib = amdgpu_ring_generic_pad_ib,
6741         .emit_rreg = gfx_v9_0_ring_emit_rreg,
6742         .emit_wreg = gfx_v9_0_ring_emit_wreg,
6743         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6744         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6745 };
6746
6747 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
6748 {
6749         int i;
6750
6751         adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
6752
6753         for (i = 0; i < adev->gfx.num_gfx_rings; i++)
6754                 adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
6755
6756         for (i = 0; i < adev->gfx.num_compute_rings; i++)
6757                 adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
6758 }
6759
6760 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
6761         .set = gfx_v9_0_set_eop_interrupt_state,
6762         .process = gfx_v9_0_eop_irq,
6763 };
6764
6765 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
6766         .set = gfx_v9_0_set_priv_reg_fault_state,
6767         .process = gfx_v9_0_priv_reg_irq,
6768 };
6769
6770 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
6771         .set = gfx_v9_0_set_priv_inst_fault_state,
6772         .process = gfx_v9_0_priv_inst_irq,
6773 };
6774
6775 static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = {
6776         .set = gfx_v9_0_set_cp_ecc_error_state,
6777         .process = amdgpu_gfx_cp_ecc_error_irq,
6778 };
6779
6780
6781 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
6782 {
6783         adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
6784         adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
6785
6786         adev->gfx.priv_reg_irq.num_types = 1;
6787         adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
6788
6789         adev->gfx.priv_inst_irq.num_types = 1;
6790         adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
6791
6792         adev->gfx.cp_ecc_error_irq.num_types = 2; /*C5 ECC error and C9 FUE error*/
6793         adev->gfx.cp_ecc_error_irq.funcs = &gfx_v9_0_cp_ecc_error_irq_funcs;
6794 }
6795
6796 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
6797 {
6798         switch (adev->asic_type) {
6799         case CHIP_VEGA10:
6800         case CHIP_VEGA12:
6801         case CHIP_VEGA20:
6802         case CHIP_RAVEN:
6803         case CHIP_ARCTURUS:
6804         case CHIP_RENOIR:
6805                 adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
6806                 break;
6807         default:
6808                 break;
6809         }
6810 }
6811
6812 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
6813 {
6814         /* init asci gds info */
6815         switch (adev->asic_type) {
6816         case CHIP_VEGA10:
6817         case CHIP_VEGA12:
6818         case CHIP_VEGA20:
6819                 adev->gds.gds_size = 0x10000;
6820                 break;
6821         case CHIP_RAVEN:
6822         case CHIP_ARCTURUS:
6823                 adev->gds.gds_size = 0x1000;
6824                 break;
6825         default:
6826                 adev->gds.gds_size = 0x10000;
6827                 break;
6828         }
6829
6830         switch (adev->asic_type) {
6831         case CHIP_VEGA10:
6832         case CHIP_VEGA20:
6833                 adev->gds.gds_compute_max_wave_id = 0x7ff;
6834                 break;
6835         case CHIP_VEGA12:
6836                 adev->gds.gds_compute_max_wave_id = 0x27f;
6837                 break;
6838         case CHIP_RAVEN:
6839                 if (adev->rev_id >= 0x8)
6840                         adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
6841                 else
6842                         adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
6843                 break;
6844         case CHIP_ARCTURUS:
6845                 adev->gds.gds_compute_max_wave_id = 0xfff;
6846                 break;
6847         default:
6848                 /* this really depends on the chip */
6849                 adev->gds.gds_compute_max_wave_id = 0x7ff;
6850                 break;
6851         }
6852
6853         adev->gds.gws_size = 64;
6854         adev->gds.oa_size = 16;
6855 }
6856
6857 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
6858                                                  u32 bitmap)
6859 {
6860         u32 data;
6861
6862         if (!bitmap)
6863                 return;
6864
6865         data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
6866         data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
6867
6868         WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
6869 }
6870
6871 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
6872 {
6873         u32 data, mask;
6874
6875         data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
6876         data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
6877
6878         data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
6879         data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
6880
6881         mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
6882
6883         return (~data) & mask;
6884 }
6885
6886 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
6887                                  struct amdgpu_cu_info *cu_info)
6888 {
6889         int i, j, k, counter, active_cu_number = 0;
6890         u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
6891         unsigned disable_masks[4 * 4];
6892
6893         if (!adev || !cu_info)
6894                 return -EINVAL;
6895
6896         /*
6897          * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
6898          */
6899         if (adev->gfx.config.max_shader_engines *
6900                 adev->gfx.config.max_sh_per_se > 16)
6901                 return -EINVAL;
6902
6903         amdgpu_gfx_parse_disable_cu(disable_masks,
6904                                     adev->gfx.config.max_shader_engines,
6905                                     adev->gfx.config.max_sh_per_se);
6906
6907         mutex_lock(&adev->grbm_idx_mutex);
6908         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
6909                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
6910                         mask = 1;
6911                         ao_bitmap = 0;
6912                         counter = 0;
6913                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
6914                         gfx_v9_0_set_user_cu_inactive_bitmap(
6915                                 adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
6916                         bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
6917
6918                         /*
6919                          * The bitmap(and ao_cu_bitmap) in cu_info structure is
6920                          * 4x4 size array, and it's usually suitable for Vega
6921                          * ASICs which has 4*2 SE/SH layout.
6922                          * But for Arcturus, SE/SH layout is changed to 8*1.
6923                          * To mostly reduce the impact, we make it compatible
6924                          * with current bitmap array as below:
6925                          *    SE4,SH0 --> bitmap[0][1]
6926                          *    SE5,SH0 --> bitmap[1][1]
6927                          *    SE6,SH0 --> bitmap[2][1]
6928                          *    SE7,SH0 --> bitmap[3][1]
6929                          */
6930                         cu_info->bitmap[i % 4][j + i / 4] = bitmap;
6931
6932                         for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
6933                                 if (bitmap & mask) {
6934                                         if (counter < adev->gfx.config.max_cu_per_sh)
6935                                                 ao_bitmap |= mask;
6936                                         counter ++;
6937                                 }
6938                                 mask <<= 1;
6939                         }
6940                         active_cu_number += counter;
6941                         if (i < 2 && j < 2)
6942                                 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
6943                         cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
6944                 }
6945         }
6946         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
6947         mutex_unlock(&adev->grbm_idx_mutex);
6948
6949         cu_info->number = active_cu_number;
6950         cu_info->ao_cu_mask = ao_cu_mask;
6951         cu_info->simd_per_cu = NUM_SIMD_PER_CU;
6952
6953         return 0;
6954 }
6955
6956 const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
6957 {
6958         .type = AMD_IP_BLOCK_TYPE_GFX,
6959         .major = 9,
6960         .minor = 0,
6961         .rev = 0,
6962         .funcs = &gfx_v9_0_ip_funcs,
6963 };