Commit | Line | Data |
---|---|---|
220ab9bd KW |
1 | /* |
2 | * Copyright 2016 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | */ | |
23 | #include <linux/firmware.h> | |
24 | #include <linux/slab.h> | |
25 | #include <linux/module.h> | |
248a1d6f | 26 | #include <drm/drmP.h> |
220ab9bd | 27 | #include "amdgpu.h" |
d05da0e2 | 28 | #include "amdgpu_atombios.h" |
220ab9bd KW |
29 | #include "amdgpu_ih.h" |
30 | #include "amdgpu_uvd.h" | |
31 | #include "amdgpu_vce.h" | |
32 | #include "amdgpu_ucode.h" | |
33 | #include "amdgpu_psp.h" | |
34 | #include "atom.h" | |
35 | #include "amd_pcie.h" | |
36 | ||
5d735f83 | 37 | #include "uvd/uvd_7_0_offset.h" |
cde5c34f FX |
38 | #include "gc/gc_9_0_offset.h" |
39 | #include "gc/gc_9_0_sh_mask.h" | |
812f77b7 FX |
40 | #include "sdma0/sdma0_4_0_offset.h" |
41 | #include "sdma1/sdma1_4_0_offset.h" | |
75199b8c FX |
42 | #include "hdp/hdp_4_0_offset.h" |
43 | #include "hdp/hdp_4_0_sh_mask.h" | |
424d9bb4 FX |
44 | #include "smuio/smuio_9_0_offset.h" |
45 | #include "smuio/smuio_9_0_sh_mask.h" | |
220ab9bd KW |
46 | |
47 | #include "soc15.h" | |
48 | #include "soc15_common.h" | |
49 | #include "gfx_v9_0.h" | |
50 | #include "gmc_v9_0.h" | |
51 | #include "gfxhub_v1_0.h" | |
52 | #include "mmhub_v1_0.h" | |
070706c0 | 53 | #include "df_v1_7.h" |
220ab9bd KW |
54 | #include "vega10_ih.h" |
55 | #include "sdma_v4_0.h" | |
56 | #include "uvd_v7_0.h" | |
57 | #include "vce_v4_0.h" | |
f2d7e707 | 58 | #include "vcn_v1_0.h" |
796b6568 | 59 | #include "dce_virtual.h" |
f1a34465 | 60 | #include "mxgpu_ai.h" |
220ab9bd | 61 | |
220ab9bd KW |
62 | #define mmMP0_MISC_CGTT_CTRL0 0x01b9 |
63 | #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0 | |
64 | #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba | |
65 | #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0 | |
66 | ||
67 | /* | |
68 | * Indirect registers accessor | |
69 | */ | |
70 | static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg) | |
71 | { | |
72 | unsigned long flags, address, data; | |
73 | u32 r; | |
946a4d5b SL |
74 | address = adev->nbio_funcs->get_pcie_index_offset(adev); |
75 | data = adev->nbio_funcs->get_pcie_data_offset(adev); | |
220ab9bd KW |
76 | |
77 | spin_lock_irqsave(&adev->pcie_idx_lock, flags); | |
78 | WREG32(address, reg); | |
79 | (void)RREG32(address); | |
80 | r = RREG32(data); | |
81 | spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); | |
82 | return r; | |
83 | } | |
84 | ||
85 | static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) | |
86 | { | |
87 | unsigned long flags, address, data; | |
220ab9bd | 88 | |
946a4d5b SL |
89 | address = adev->nbio_funcs->get_pcie_index_offset(adev); |
90 | data = adev->nbio_funcs->get_pcie_data_offset(adev); | |
220ab9bd KW |
91 | |
92 | spin_lock_irqsave(&adev->pcie_idx_lock, flags); | |
93 | WREG32(address, reg); | |
94 | (void)RREG32(address); | |
95 | WREG32(data, v); | |
96 | (void)RREG32(data); | |
97 | spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); | |
98 | } | |
99 | ||
100 | static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) | |
101 | { | |
102 | unsigned long flags, address, data; | |
103 | u32 r; | |
104 | ||
105 | address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX); | |
106 | data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA); | |
107 | ||
108 | spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); | |
109 | WREG32(address, ((reg) & 0x1ff)); | |
110 | r = RREG32(data); | |
111 | spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); | |
112 | return r; | |
113 | } | |
114 | ||
115 | static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) | |
116 | { | |
117 | unsigned long flags, address, data; | |
118 | ||
119 | address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX); | |
120 | data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA); | |
121 | ||
122 | spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); | |
123 | WREG32(address, ((reg) & 0x1ff)); | |
124 | WREG32(data, (v)); | |
125 | spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); | |
126 | } | |
127 | ||
128 | static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg) | |
129 | { | |
130 | unsigned long flags, address, data; | |
131 | u32 r; | |
132 | ||
133 | address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); | |
134 | data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); | |
135 | ||
136 | spin_lock_irqsave(&adev->didt_idx_lock, flags); | |
137 | WREG32(address, (reg)); | |
138 | r = RREG32(data); | |
139 | spin_unlock_irqrestore(&adev->didt_idx_lock, flags); | |
140 | return r; | |
141 | } | |
142 | ||
143 | static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) | |
144 | { | |
145 | unsigned long flags, address, data; | |
146 | ||
147 | address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); | |
148 | data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); | |
149 | ||
150 | spin_lock_irqsave(&adev->didt_idx_lock, flags); | |
151 | WREG32(address, (reg)); | |
152 | WREG32(data, (v)); | |
153 | spin_unlock_irqrestore(&adev->didt_idx_lock, flags); | |
154 | } | |
155 | ||
560460f2 EQ |
156 | static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) |
157 | { | |
158 | unsigned long flags; | |
159 | u32 r; | |
160 | ||
161 | spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); | |
162 | WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg)); | |
163 | r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA); | |
164 | spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); | |
165 | return r; | |
166 | } | |
167 | ||
168 | static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) | |
169 | { | |
170 | unsigned long flags; | |
171 | ||
172 | spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); | |
173 | WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg)); | |
174 | WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v)); | |
175 | spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); | |
176 | } | |
177 | ||
2f11fb02 EQ |
178 | static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg) |
179 | { | |
180 | unsigned long flags; | |
181 | u32 r; | |
182 | ||
183 | spin_lock_irqsave(&adev->se_cac_idx_lock, flags); | |
184 | WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg)); | |
185 | r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA); | |
186 | spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags); | |
187 | return r; | |
188 | } | |
189 | ||
190 | static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) | |
191 | { | |
192 | unsigned long flags; | |
193 | ||
194 | spin_lock_irqsave(&adev->se_cac_idx_lock, flags); | |
195 | WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg)); | |
196 | WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v)); | |
197 | spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags); | |
198 | } | |
199 | ||
220ab9bd KW |
200 | static u32 soc15_get_config_memsize(struct amdgpu_device *adev) |
201 | { | |
bf383fb6 | 202 | return adev->nbio_funcs->get_memsize(adev); |
220ab9bd KW |
203 | } |
204 | ||
220ab9bd KW |
205 | static u32 soc15_get_xclk(struct amdgpu_device *adev) |
206 | { | |
76d6172b | 207 | return adev->clock.spll.reference_freq; |
220ab9bd KW |
208 | } |
209 | ||
210 | ||
211 | void soc15_grbm_select(struct amdgpu_device *adev, | |
212 | u32 me, u32 pipe, u32 queue, u32 vmid) | |
213 | { | |
214 | u32 grbm_gfx_cntl = 0; | |
215 | grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe); | |
216 | grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me); | |
217 | grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); | |
218 | grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); | |
219 | ||
220 | WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl); | |
221 | } | |
222 | ||
223 | static void soc15_vga_set_state(struct amdgpu_device *adev, bool state) | |
224 | { | |
225 | /* todo */ | |
226 | } | |
227 | ||
228 | static bool soc15_read_disabled_bios(struct amdgpu_device *adev) | |
229 | { | |
230 | /* todo */ | |
231 | return false; | |
232 | } | |
233 | ||
234 | static bool soc15_read_bios_from_rom(struct amdgpu_device *adev, | |
235 | u8 *bios, u32 length_bytes) | |
236 | { | |
237 | u32 *dw_ptr; | |
238 | u32 i, length_dw; | |
239 | ||
240 | if (bios == NULL) | |
241 | return false; | |
242 | if (length_bytes == 0) | |
243 | return false; | |
244 | /* APU vbios image is part of sbios image */ | |
245 | if (adev->flags & AMD_IS_APU) | |
246 | return false; | |
247 | ||
248 | dw_ptr = (u32 *)bios; | |
249 | length_dw = ALIGN(length_bytes, 4) / 4; | |
250 | ||
251 | /* set rom index to 0 */ | |
252 | WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0); | |
253 | /* read out the rom data */ | |
254 | for (i = 0; i < length_dw; i++) | |
255 | dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA)); | |
256 | ||
257 | return true; | |
258 | } | |
259 | ||
946a4d5b SL |
260 | struct soc15_allowed_register_entry { |
261 | uint32_t hwip; | |
262 | uint32_t inst; | |
263 | uint32_t seg; | |
264 | uint32_t reg_offset; | |
265 | bool grbm_indexed; | |
266 | }; | |
267 | ||
268 | ||
269 | static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = { | |
270 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)}, | |
271 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)}, | |
272 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)}, | |
273 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)}, | |
274 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)}, | |
275 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)}, | |
276 | { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)}, | |
277 | { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)}, | |
278 | { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)}, | |
279 | { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)}, | |
280 | { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)}, | |
281 | { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)}, | |
282 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, | |
283 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, | |
284 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, | |
285 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, | |
286 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, | |
287 | { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, | |
5eeae247 | 288 | { SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)}, |
220ab9bd KW |
289 | }; |
290 | ||
291 | static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num, | |
292 | u32 sh_num, u32 reg_offset) | |
293 | { | |
294 | uint32_t val; | |
295 | ||
296 | mutex_lock(&adev->grbm_idx_mutex); | |
297 | if (se_num != 0xffffffff || sh_num != 0xffffffff) | |
298 | amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); | |
299 | ||
300 | val = RREG32(reg_offset); | |
301 | ||
302 | if (se_num != 0xffffffff || sh_num != 0xffffffff) | |
303 | amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); | |
304 | mutex_unlock(&adev->grbm_idx_mutex); | |
305 | return val; | |
306 | } | |
307 | ||
c013cea2 AD |
308 | static uint32_t soc15_get_register_value(struct amdgpu_device *adev, |
309 | bool indexed, u32 se_num, | |
310 | u32 sh_num, u32 reg_offset) | |
311 | { | |
312 | if (indexed) { | |
313 | return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset); | |
314 | } else { | |
cd29253f | 315 | if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)) |
c013cea2 | 316 | return adev->gfx.config.gb_addr_config; |
5eeae247 AD |
317 | else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2)) |
318 | return adev->gfx.config.db_debug2; | |
cd29253f | 319 | return RREG32(reg_offset); |
c013cea2 AD |
320 | } |
321 | } | |
322 | ||
220ab9bd KW |
323 | static int soc15_read_register(struct amdgpu_device *adev, u32 se_num, |
324 | u32 sh_num, u32 reg_offset, u32 *value) | |
325 | { | |
3032f350 | 326 | uint32_t i; |
946a4d5b | 327 | struct soc15_allowed_register_entry *en; |
220ab9bd KW |
328 | |
329 | *value = 0; | |
220ab9bd | 330 | for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) { |
946a4d5b SL |
331 | en = &soc15_allowed_read_registers[i]; |
332 | if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] | |
333 | + en->reg_offset)) | |
220ab9bd KW |
334 | continue; |
335 | ||
97fcc76b CK |
336 | *value = soc15_get_register_value(adev, |
337 | soc15_allowed_read_registers[i].grbm_indexed, | |
338 | se_num, sh_num, reg_offset); | |
220ab9bd KW |
339 | return 0; |
340 | } | |
341 | return -EINVAL; | |
342 | } | |
343 | ||
946a4d5b SL |
344 | |
345 | /** | |
346 | * soc15_program_register_sequence - program an array of registers. | |
347 | * | |
348 | * @adev: amdgpu_device pointer | |
349 | * @regs: pointer to the register array | |
350 | * @array_size: size of the register array | |
351 | * | |
352 | * Programs an array or registers with and and or masks. | |
353 | * This is a helper for setting golden registers. | |
354 | */ | |
355 | ||
356 | void soc15_program_register_sequence(struct amdgpu_device *adev, | |
357 | const struct soc15_reg_golden *regs, | |
358 | const u32 array_size) | |
359 | { | |
360 | const struct soc15_reg_golden *entry; | |
361 | u32 tmp, reg; | |
362 | int i; | |
363 | ||
364 | for (i = 0; i < array_size; ++i) { | |
365 | entry = ®s[i]; | |
366 | reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg; | |
367 | ||
368 | if (entry->and_mask == 0xffffffff) { | |
369 | tmp = entry->or_mask; | |
370 | } else { | |
371 | tmp = RREG32(reg); | |
372 | tmp &= ~(entry->and_mask); | |
373 | tmp |= entry->or_mask; | |
374 | } | |
375 | WREG32(reg, tmp); | |
376 | } | |
377 | ||
378 | } | |
379 | ||
380 | ||
98512bb8 | 381 | static int soc15_asic_reset(struct amdgpu_device *adev) |
220ab9bd KW |
382 | { |
383 | u32 i; | |
384 | ||
98512bb8 KW |
385 | amdgpu_atombios_scratch_regs_engine_hung(adev, true); |
386 | ||
387 | dev_info(adev->dev, "GPU reset\n"); | |
220ab9bd KW |
388 | |
389 | /* disable BM */ | |
390 | pci_clear_master(adev->pdev); | |
220ab9bd | 391 | |
98512bb8 KW |
392 | pci_save_state(adev->pdev); |
393 | ||
f75a9a5d | 394 | psp_gpu_reset(adev); |
98512bb8 KW |
395 | |
396 | pci_restore_state(adev->pdev); | |
220ab9bd KW |
397 | |
398 | /* wait for asic to come out of reset */ | |
399 | for (i = 0; i < adev->usec_timeout; i++) { | |
bf383fb6 AD |
400 | u32 memsize = adev->nbio_funcs->get_memsize(adev); |
401 | ||
aecbe64f | 402 | if (memsize != 0xffffffff) |
220ab9bd KW |
403 | break; |
404 | udelay(1); | |
405 | } | |
406 | ||
d05da0e2 | 407 | amdgpu_atombios_scratch_regs_engine_hung(adev, false); |
220ab9bd KW |
408 | |
409 | return 0; | |
410 | } | |
411 | ||
412 | /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock, | |
413 | u32 cntl_reg, u32 status_reg) | |
414 | { | |
415 | return 0; | |
416 | }*/ | |
417 | ||
418 | static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) | |
419 | { | |
420 | /*int r; | |
421 | ||
422 | r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); | |
423 | if (r) | |
424 | return r; | |
425 | ||
426 | r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); | |
427 | */ | |
428 | return 0; | |
429 | } | |
430 | ||
431 | static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) | |
432 | { | |
433 | /* todo */ | |
434 | ||
435 | return 0; | |
436 | } | |
437 | ||
438 | static void soc15_pcie_gen3_enable(struct amdgpu_device *adev) | |
439 | { | |
440 | if (pci_is_root_bus(adev->pdev->bus)) | |
441 | return; | |
442 | ||
443 | if (amdgpu_pcie_gen2 == 0) | |
444 | return; | |
445 | ||
446 | if (adev->flags & AMD_IS_APU) | |
447 | return; | |
448 | ||
449 | if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | | |
450 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) | |
451 | return; | |
452 | ||
453 | /* todo */ | |
454 | } | |
455 | ||
456 | static void soc15_program_aspm(struct amdgpu_device *adev) | |
457 | { | |
458 | ||
459 | if (amdgpu_aspm == 0) | |
460 | return; | |
461 | ||
462 | /* todo */ | |
463 | } | |
464 | ||
465 | static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev, | |
bf383fb6 | 466 | bool enable) |
220ab9bd | 467 | { |
bf383fb6 AD |
468 | adev->nbio_funcs->enable_doorbell_aperture(adev, enable); |
469 | adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable); | |
220ab9bd KW |
470 | } |
471 | ||
472 | static const struct amdgpu_ip_block_version vega10_common_ip_block = | |
473 | { | |
474 | .type = AMD_IP_BLOCK_TYPE_COMMON, | |
475 | .major = 2, | |
476 | .minor = 0, | |
477 | .rev = 0, | |
478 | .funcs = &soc15_common_ip_funcs, | |
479 | }; | |
480 | ||
481 | int soc15_set_ip_blocks(struct amdgpu_device *adev) | |
482 | { | |
4522824c SL |
483 | /* Set IP register base before any HW register access */ |
484 | switch (adev->asic_type) { | |
485 | case CHIP_VEGA10: | |
3084eb00 | 486 | case CHIP_VEGA12: |
4522824c SL |
487 | case CHIP_RAVEN: |
488 | vega10_reg_base_init(adev); | |
489 | break; | |
490 | default: | |
491 | return -EINVAL; | |
492 | } | |
493 | ||
bf383fb6 AD |
494 | if (adev->flags & AMD_IS_APU) |
495 | adev->nbio_funcs = &nbio_v7_0_funcs; | |
496 | else | |
497 | adev->nbio_funcs = &nbio_v6_1_funcs; | |
498 | ||
070706c0 | 499 | adev->df_funcs = &df_v1_7_funcs; |
bf383fb6 | 500 | adev->nbio_funcs->detect_hw_virt(adev); |
1b922423 | 501 | |
f1a34465 XY |
502 | if (amdgpu_sriov_vf(adev)) |
503 | adev->virt.ops = &xgpu_ai_virt_ops; | |
504 | ||
220ab9bd KW |
505 | switch (adev->asic_type) { |
506 | case CHIP_VEGA10: | |
692069a1 | 507 | case CHIP_VEGA12: |
2990a1fc AD |
508 | amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); |
509 | amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); | |
510 | amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); | |
3cdfe700 | 511 | amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); |
c6f3e7cb | 512 | if (!amdgpu_sriov_vf(adev)) |
b905090d | 513 | amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); |
f8445307 | 514 | if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) |
2990a1fc | 515 | amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); |
ab587d4a AD |
516 | #if defined(CONFIG_DRM_AMD_DC) |
517 | else if (amdgpu_device_has_dc_support(adev)) | |
2990a1fc | 518 | amdgpu_device_ip_block_add(adev, &dm_ip_block); |
ab587d4a AD |
519 | #else |
520 | # warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15." | |
521 | #endif | |
2990a1fc AD |
522 | amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); |
523 | amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); | |
524 | amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); | |
525 | amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); | |
220ab9bd | 526 | break; |
1023b797 | 527 | case CHIP_RAVEN: |
2990a1fc AD |
528 | amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); |
529 | amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); | |
530 | amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); | |
531 | amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); | |
b905090d | 532 | amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); |
d67fed16 | 533 | if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) |
2990a1fc | 534 | amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); |
0bf954c1 AD |
535 | #if defined(CONFIG_DRM_AMD_DC) |
536 | else if (amdgpu_device_has_dc_support(adev)) | |
2990a1fc | 537 | amdgpu_device_ip_block_add(adev, &dm_ip_block); |
0bf954c1 AD |
538 | #else |
539 | # warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15." | |
540 | #endif | |
2990a1fc AD |
541 | amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); |
542 | amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); | |
543 | amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); | |
1023b797 | 544 | break; |
220ab9bd KW |
545 | default: |
546 | return -EINVAL; | |
547 | } | |
548 | ||
549 | return 0; | |
550 | } | |
551 | ||
552 | static uint32_t soc15_get_rev_id(struct amdgpu_device *adev) | |
553 | { | |
bf383fb6 | 554 | return adev->nbio_funcs->get_rev_id(adev); |
220ab9bd KW |
555 | } |
556 | ||
69882565 | 557 | static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) |
73c73240 | 558 | { |
69882565 | 559 | adev->nbio_funcs->hdp_flush(adev, ring); |
73c73240 AD |
560 | } |
561 | ||
69882565 CK |
562 | static void soc15_invalidate_hdp(struct amdgpu_device *adev, |
563 | struct amdgpu_ring *ring) | |
73c73240 | 564 | { |
69882565 CK |
565 | if (!ring || !ring->funcs->emit_wreg) |
566 | WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1); | |
567 | else | |
568 | amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( | |
569 | HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); | |
73c73240 AD |
570 | } |
571 | ||
adbd4f89 AD |
572 | static bool soc15_need_full_reset(struct amdgpu_device *adev) |
573 | { | |
574 | /* change this when we implement soft reset */ | |
575 | return true; | |
576 | } | |
577 | ||
220ab9bd KW |
578 | static const struct amdgpu_asic_funcs soc15_asic_funcs = |
579 | { | |
580 | .read_disabled_bios = &soc15_read_disabled_bios, | |
581 | .read_bios_from_rom = &soc15_read_bios_from_rom, | |
582 | .read_register = &soc15_read_register, | |
583 | .reset = &soc15_asic_reset, | |
584 | .set_vga_state = &soc15_vga_set_state, | |
585 | .get_xclk = &soc15_get_xclk, | |
586 | .set_uvd_clocks = &soc15_set_uvd_clocks, | |
587 | .set_vce_clocks = &soc15_set_vce_clocks, | |
588 | .get_config_memsize = &soc15_get_config_memsize, | |
73c73240 AD |
589 | .flush_hdp = &soc15_flush_hdp, |
590 | .invalidate_hdp = &soc15_invalidate_hdp, | |
adbd4f89 | 591 | .need_full_reset = &soc15_need_full_reset, |
220ab9bd KW |
592 | }; |
593 | ||
594 | static int soc15_common_early_init(void *handle) | |
595 | { | |
220ab9bd KW |
596 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
597 | ||
598 | adev->smc_rreg = NULL; | |
599 | adev->smc_wreg = NULL; | |
600 | adev->pcie_rreg = &soc15_pcie_rreg; | |
601 | adev->pcie_wreg = &soc15_pcie_wreg; | |
602 | adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg; | |
603 | adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg; | |
604 | adev->didt_rreg = &soc15_didt_rreg; | |
605 | adev->didt_wreg = &soc15_didt_wreg; | |
560460f2 EQ |
606 | adev->gc_cac_rreg = &soc15_gc_cac_rreg; |
607 | adev->gc_cac_wreg = &soc15_gc_cac_wreg; | |
2f11fb02 EQ |
608 | adev->se_cac_rreg = &soc15_se_cac_rreg; |
609 | adev->se_cac_wreg = &soc15_se_cac_wreg; | |
220ab9bd KW |
610 | |
611 | adev->asic_funcs = &soc15_asic_funcs; | |
612 | ||
220ab9bd KW |
613 | adev->rev_id = soc15_get_rev_id(adev); |
614 | adev->external_rev_id = 0xFF; | |
615 | switch (adev->asic_type) { | |
616 | case CHIP_VEGA10: | |
617 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | | |
618 | AMD_CG_SUPPORT_GFX_MGLS | | |
619 | AMD_CG_SUPPORT_GFX_RLC_LS | | |
620 | AMD_CG_SUPPORT_GFX_CP_LS | | |
621 | AMD_CG_SUPPORT_GFX_3D_CGCG | | |
622 | AMD_CG_SUPPORT_GFX_3D_CGLS | | |
623 | AMD_CG_SUPPORT_GFX_CGCG | | |
624 | AMD_CG_SUPPORT_GFX_CGLS | | |
625 | AMD_CG_SUPPORT_BIF_MGCG | | |
626 | AMD_CG_SUPPORT_BIF_LS | | |
627 | AMD_CG_SUPPORT_HDP_LS | | |
628 | AMD_CG_SUPPORT_DRM_MGCG | | |
629 | AMD_CG_SUPPORT_DRM_LS | | |
630 | AMD_CG_SUPPORT_ROM_MGCG | | |
631 | AMD_CG_SUPPORT_DF_MGCG | | |
632 | AMD_CG_SUPPORT_SDMA_MGCG | | |
633 | AMD_CG_SUPPORT_SDMA_LS | | |
634 | AMD_CG_SUPPORT_MC_MGCG | | |
635 | AMD_CG_SUPPORT_MC_LS; | |
636 | adev->pg_flags = 0; | |
637 | adev->external_rev_id = 0x1; | |
638 | break; | |
692069a1 | 639 | case CHIP_VEGA12: |
e4a38755 EQ |
640 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
641 | AMD_CG_SUPPORT_GFX_MGLS | | |
642 | AMD_CG_SUPPORT_GFX_CGCG | | |
643 | AMD_CG_SUPPORT_GFX_CGLS | | |
644 | AMD_CG_SUPPORT_GFX_3D_CGCG | | |
645 | AMD_CG_SUPPORT_GFX_3D_CGLS | | |
646 | AMD_CG_SUPPORT_GFX_CP_LS | | |
647 | AMD_CG_SUPPORT_MC_LS | | |
648 | AMD_CG_SUPPORT_MC_MGCG | | |
649 | AMD_CG_SUPPORT_SDMA_MGCG | | |
650 | AMD_CG_SUPPORT_SDMA_LS | | |
651 | AMD_CG_SUPPORT_BIF_MGCG | | |
652 | AMD_CG_SUPPORT_BIF_LS | | |
653 | AMD_CG_SUPPORT_HDP_MGCG | | |
654 | AMD_CG_SUPPORT_HDP_LS | | |
655 | AMD_CG_SUPPORT_ROM_MGCG | | |
656 | AMD_CG_SUPPORT_VCE_MGCG | | |
657 | AMD_CG_SUPPORT_UVD_MGCG; | |
692069a1 | 658 | adev->pg_flags = 0; |
f559fe2b | 659 | adev->external_rev_id = adev->rev_id + 0x14; |
692069a1 | 660 | break; |
935be7a0 FX |
661 | case CHIP_VEGA20: |
662 | adev->cg_flags = 0; | |
663 | adev->pg_flags = 0; | |
664 | adev->external_rev_id = adev->rev_id + 0x28; | |
665 | break; | |
957c6fe1 | 666 | case CHIP_RAVEN: |
5c5928a2 HR |
667 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
668 | AMD_CG_SUPPORT_GFX_MGLS | | |
669 | AMD_CG_SUPPORT_GFX_RLC_LS | | |
670 | AMD_CG_SUPPORT_GFX_CP_LS | | |
671 | AMD_CG_SUPPORT_GFX_3D_CGCG | | |
672 | AMD_CG_SUPPORT_GFX_3D_CGLS | | |
673 | AMD_CG_SUPPORT_GFX_CGCG | | |
674 | AMD_CG_SUPPORT_GFX_CGLS | | |
675 | AMD_CG_SUPPORT_BIF_MGCG | | |
676 | AMD_CG_SUPPORT_BIF_LS | | |
677 | AMD_CG_SUPPORT_HDP_MGCG | | |
678 | AMD_CG_SUPPORT_HDP_LS | | |
679 | AMD_CG_SUPPORT_DRM_MGCG | | |
680 | AMD_CG_SUPPORT_DRM_LS | | |
c2cdb0ec HR |
681 | AMD_CG_SUPPORT_ROM_MGCG | |
682 | AMD_CG_SUPPORT_MC_MGCG | | |
fe1a3b2e HR |
683 | AMD_CG_SUPPORT_MC_LS | |
684 | AMD_CG_SUPPORT_SDMA_MGCG | | |
685 | AMD_CG_SUPPORT_SDMA_LS; | |
400b6afb HR |
686 | adev->pg_flags = AMD_PG_SUPPORT_SDMA; |
687 | ||
9ac4b0d9 HR |
688 | if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) |
689 | adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | | |
690 | AMD_PG_SUPPORT_CP | | |
691 | AMD_PG_SUPPORT_RLC_SMU_HS; | |
692 | ||
957c6fe1 HZ |
693 | adev->external_rev_id = 0x1; |
694 | break; | |
220ab9bd KW |
695 | default: |
696 | /* FIXME: not supported yet */ | |
697 | return -EINVAL; | |
698 | } | |
699 | ||
ab276632 XY |
700 | if (amdgpu_sriov_vf(adev)) { |
701 | amdgpu_virt_init_setting(adev); | |
702 | xgpu_ai_mailbox_set_irq_funcs(adev); | |
703 | } | |
704 | ||
220ab9bd KW |
705 | return 0; |
706 | } | |
707 | ||
81758c55 ML |
708 | static int soc15_common_late_init(void *handle) |
709 | { | |
710 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
711 | ||
712 | if (amdgpu_sriov_vf(adev)) | |
713 | xgpu_ai_mailbox_get_irq(adev); | |
714 | ||
715 | return 0; | |
716 | } | |
717 | ||
220ab9bd KW |
718 | static int soc15_common_sw_init(void *handle) |
719 | { | |
81758c55 ML |
720 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
721 | ||
722 | if (amdgpu_sriov_vf(adev)) | |
723 | xgpu_ai_mailbox_add_irq_id(adev); | |
724 | ||
220ab9bd KW |
725 | return 0; |
726 | } | |
727 | ||
728 | static int soc15_common_sw_fini(void *handle) | |
729 | { | |
730 | return 0; | |
731 | } | |
732 | ||
733 | static int soc15_common_hw_init(void *handle) | |
734 | { | |
735 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
736 | ||
220ab9bd KW |
737 | /* enable pcie gen2/3 link */ |
738 | soc15_pcie_gen3_enable(adev); | |
739 | /* enable aspm */ | |
740 | soc15_program_aspm(adev); | |
833fa075 | 741 | /* setup nbio registers */ |
bf383fb6 | 742 | adev->nbio_funcs->init_registers(adev); |
220ab9bd KW |
743 | /* enable the doorbell aperture */ |
744 | soc15_enable_doorbell_aperture(adev, true); | |
745 | ||
746 | return 0; | |
747 | } | |
748 | ||
749 | static int soc15_common_hw_fini(void *handle) | |
750 | { | |
751 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
752 | ||
753 | /* disable the doorbell aperture */ | |
754 | soc15_enable_doorbell_aperture(adev, false); | |
81758c55 ML |
755 | if (amdgpu_sriov_vf(adev)) |
756 | xgpu_ai_mailbox_put_irq(adev); | |
220ab9bd KW |
757 | |
758 | return 0; | |
759 | } | |
760 | ||
761 | static int soc15_common_suspend(void *handle) | |
762 | { | |
763 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
764 | ||
765 | return soc15_common_hw_fini(adev); | |
766 | } | |
767 | ||
768 | static int soc15_common_resume(void *handle) | |
769 | { | |
770 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
771 | ||
772 | return soc15_common_hw_init(adev); | |
773 | } | |
774 | ||
775 | static bool soc15_common_is_idle(void *handle) | |
776 | { | |
777 | return true; | |
778 | } | |
779 | ||
780 | static int soc15_common_wait_for_idle(void *handle) | |
781 | { | |
782 | return 0; | |
783 | } | |
784 | ||
785 | static int soc15_common_soft_reset(void *handle) | |
786 | { | |
787 | return 0; | |
788 | } | |
789 | ||
790 | static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable) | |
791 | { | |
792 | uint32_t def, data; | |
793 | ||
794 | def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); | |
795 | ||
796 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) | |
797 | data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; | |
798 | else | |
799 | data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; | |
800 | ||
801 | if (def != data) | |
802 | WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data); | |
803 | } | |
804 | ||
805 | static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable) | |
806 | { | |
807 | uint32_t def, data; | |
808 | ||
809 | def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); | |
810 | ||
811 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG)) | |
812 | data &= ~(0x01000000 | | |
813 | 0x02000000 | | |
814 | 0x04000000 | | |
815 | 0x08000000 | | |
816 | 0x10000000 | | |
817 | 0x20000000 | | |
818 | 0x40000000 | | |
819 | 0x80000000); | |
820 | else | |
821 | data |= (0x01000000 | | |
822 | 0x02000000 | | |
823 | 0x04000000 | | |
824 | 0x08000000 | | |
825 | 0x10000000 | | |
826 | 0x20000000 | | |
827 | 0x40000000 | | |
828 | 0x80000000); | |
829 | ||
830 | if (def != data) | |
831 | WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data); | |
832 | } | |
833 | ||
834 | static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable) | |
835 | { | |
836 | uint32_t def, data; | |
837 | ||
838 | def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL)); | |
839 | ||
840 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS)) | |
841 | data |= 1; | |
842 | else | |
843 | data &= ~1; | |
844 | ||
845 | if (def != data) | |
846 | WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data); | |
847 | } | |
848 | ||
849 | static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, | |
850 | bool enable) | |
851 | { | |
852 | uint32_t def, data; | |
853 | ||
854 | def = data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0)); | |
855 | ||
856 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) | |
857 | data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | | |
858 | CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); | |
859 | else | |
860 | data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | | |
861 | CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; | |
862 | ||
863 | if (def != data) | |
864 | WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data); | |
865 | } | |
866 | ||
220ab9bd KW |
867 | static int soc15_common_set_clockgating_state(void *handle, |
868 | enum amd_clockgating_state state) | |
869 | { | |
870 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
871 | ||
6e9dc861 ML |
872 | if (amdgpu_sriov_vf(adev)) |
873 | return 0; | |
874 | ||
220ab9bd KW |
875 | switch (adev->asic_type) { |
876 | case CHIP_VEGA10: | |
692069a1 | 877 | case CHIP_VEGA12: |
f980d127 | 878 | case CHIP_VEGA20: |
bf383fb6 | 879 | adev->nbio_funcs->update_medium_grain_clock_gating(adev, |
220ab9bd | 880 | state == AMD_CG_STATE_GATE ? true : false); |
bf383fb6 | 881 | adev->nbio_funcs->update_medium_grain_light_sleep(adev, |
220ab9bd KW |
882 | state == AMD_CG_STATE_GATE ? true : false); |
883 | soc15_update_hdp_light_sleep(adev, | |
884 | state == AMD_CG_STATE_GATE ? true : false); | |
885 | soc15_update_drm_clock_gating(adev, | |
886 | state == AMD_CG_STATE_GATE ? true : false); | |
887 | soc15_update_drm_light_sleep(adev, | |
888 | state == AMD_CG_STATE_GATE ? true : false); | |
889 | soc15_update_rom_medium_grain_clock_gating(adev, | |
890 | state == AMD_CG_STATE_GATE ? true : false); | |
070706c0 | 891 | adev->df_funcs->update_medium_grain_clock_gating(adev, |
220ab9bd KW |
892 | state == AMD_CG_STATE_GATE ? true : false); |
893 | break; | |
9e5a9eb4 | 894 | case CHIP_RAVEN: |
bf383fb6 | 895 | adev->nbio_funcs->update_medium_grain_clock_gating(adev, |
9e5a9eb4 | 896 | state == AMD_CG_STATE_GATE ? true : false); |
bf383fb6 | 897 | adev->nbio_funcs->update_medium_grain_light_sleep(adev, |
9e5a9eb4 HR |
898 | state == AMD_CG_STATE_GATE ? true : false); |
899 | soc15_update_hdp_light_sleep(adev, | |
900 | state == AMD_CG_STATE_GATE ? true : false); | |
901 | soc15_update_drm_clock_gating(adev, | |
902 | state == AMD_CG_STATE_GATE ? true : false); | |
903 | soc15_update_drm_light_sleep(adev, | |
904 | state == AMD_CG_STATE_GATE ? true : false); | |
905 | soc15_update_rom_medium_grain_clock_gating(adev, | |
906 | state == AMD_CG_STATE_GATE ? true : false); | |
907 | break; | |
220ab9bd KW |
908 | default: |
909 | break; | |
910 | } | |
911 | return 0; | |
912 | } | |
913 | ||
f9abe35c HR |
914 | static void soc15_common_get_clockgating_state(void *handle, u32 *flags) |
915 | { | |
916 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
917 | int data; | |
918 | ||
919 | if (amdgpu_sriov_vf(adev)) | |
920 | *flags = 0; | |
921 | ||
bf383fb6 | 922 | adev->nbio_funcs->get_clockgating_state(adev, flags); |
f9abe35c HR |
923 | |
924 | /* AMD_CG_SUPPORT_HDP_LS */ | |
925 | data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); | |
926 | if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK) | |
927 | *flags |= AMD_CG_SUPPORT_HDP_LS; | |
928 | ||
929 | /* AMD_CG_SUPPORT_DRM_MGCG */ | |
930 | data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); | |
931 | if (!(data & 0x01000000)) | |
932 | *flags |= AMD_CG_SUPPORT_DRM_MGCG; | |
933 | ||
934 | /* AMD_CG_SUPPORT_DRM_LS */ | |
935 | data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL)); | |
936 | if (data & 0x1) | |
937 | *flags |= AMD_CG_SUPPORT_DRM_LS; | |
938 | ||
939 | /* AMD_CG_SUPPORT_ROM_MGCG */ | |
940 | data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0)); | |
941 | if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) | |
942 | *flags |= AMD_CG_SUPPORT_ROM_MGCG; | |
943 | ||
070706c0 | 944 | adev->df_funcs->get_clockgating_state(adev, flags); |
f9abe35c HR |
945 | } |
946 | ||
220ab9bd KW |
947 | static int soc15_common_set_powergating_state(void *handle, |
948 | enum amd_powergating_state state) | |
949 | { | |
950 | /* todo */ | |
951 | return 0; | |
952 | } | |
953 | ||
954 | const struct amd_ip_funcs soc15_common_ip_funcs = { | |
955 | .name = "soc15_common", | |
956 | .early_init = soc15_common_early_init, | |
81758c55 | 957 | .late_init = soc15_common_late_init, |
220ab9bd KW |
958 | .sw_init = soc15_common_sw_init, |
959 | .sw_fini = soc15_common_sw_fini, | |
960 | .hw_init = soc15_common_hw_init, | |
961 | .hw_fini = soc15_common_hw_fini, | |
962 | .suspend = soc15_common_suspend, | |
963 | .resume = soc15_common_resume, | |
964 | .is_idle = soc15_common_is_idle, | |
965 | .wait_for_idle = soc15_common_wait_for_idle, | |
966 | .soft_reset = soc15_common_soft_reset, | |
967 | .set_clockgating_state = soc15_common_set_clockgating_state, | |
968 | .set_powergating_state = soc15_common_set_powergating_state, | |
f9abe35c | 969 | .get_clockgating_state= soc15_common_get_clockgating_state, |
220ab9bd | 970 | }; |