Commit | Line | Data |
---|---|---|
aaa36a97 AD |
1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | * Authors: Christian König <christian.koenig@amd.com> | |
26 | */ | |
27 | ||
28 | #include <linux/firmware.h> | |
29 | #include <drm/drmP.h> | |
30 | #include "amdgpu.h" | |
31 | #include "amdgpu_vce.h" | |
32 | #include "vid.h" | |
33 | #include "vce/vce_3_0_d.h" | |
34 | #include "vce/vce_3_0_sh_mask.h" | |
be4f38e2 AD |
35 | #include "oss/oss_3_0_d.h" |
36 | #include "oss/oss_3_0_sh_mask.h" | |
5bbc553a | 37 | #include "gca/gfx_8_0_d.h" |
6a585777 AD |
38 | #include "smu/smu_7_1_2_d.h" |
39 | #include "smu/smu_7_1_2_sh_mask.h" | |
115933a5 CZ |
40 | #include "gca/gfx_8_0_d.h" |
41 | #include "gca/gfx_8_0_sh_mask.h" | |
42 | ||
5bbc553a LL |
43 | |
44 | #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 | |
45 | #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 | |
edf600da CK |
46 | #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 |
47 | #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 | |
48 | #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 | |
567e6e29 | 49 | #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 |
aaa36a97 | 50 | |
e9822622 LL |
51 | #define VCE_V3_0_FW_SIZE (384 * 1024) |
52 | #define VCE_V3_0_STACK_SIZE (64 * 1024) | |
53 | #define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024)) | |
54 | ||
5bbc553a | 55 | static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); |
aaa36a97 AD |
56 | static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); |
57 | static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); | |
567e6e29 | 58 | static int vce_v3_0_wait_for_idle(void *handle); |
aaa36a97 AD |
59 | |
60 | /** | |
61 | * vce_v3_0_ring_get_rptr - get read pointer | |
62 | * | |
63 | * @ring: amdgpu_ring pointer | |
64 | * | |
65 | * Returns the current hardware read pointer | |
66 | */ | |
67 | static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring) | |
68 | { | |
69 | struct amdgpu_device *adev = ring->adev; | |
70 | ||
71 | if (ring == &adev->vce.ring[0]) | |
72 | return RREG32(mmVCE_RB_RPTR); | |
6f0359ff | 73 | else if (ring == &adev->vce.ring[1]) |
aaa36a97 | 74 | return RREG32(mmVCE_RB_RPTR2); |
6f0359ff AD |
75 | else |
76 | return RREG32(mmVCE_RB_RPTR3); | |
aaa36a97 AD |
77 | } |
78 | ||
79 | /** | |
80 | * vce_v3_0_ring_get_wptr - get write pointer | |
81 | * | |
82 | * @ring: amdgpu_ring pointer | |
83 | * | |
84 | * Returns the current hardware write pointer | |
85 | */ | |
86 | static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring) | |
87 | { | |
88 | struct amdgpu_device *adev = ring->adev; | |
89 | ||
90 | if (ring == &adev->vce.ring[0]) | |
91 | return RREG32(mmVCE_RB_WPTR); | |
6f0359ff | 92 | else if (ring == &adev->vce.ring[1]) |
aaa36a97 | 93 | return RREG32(mmVCE_RB_WPTR2); |
6f0359ff AD |
94 | else |
95 | return RREG32(mmVCE_RB_WPTR3); | |
aaa36a97 AD |
96 | } |
97 | ||
98 | /** | |
99 | * vce_v3_0_ring_set_wptr - set write pointer | |
100 | * | |
101 | * @ring: amdgpu_ring pointer | |
102 | * | |
103 | * Commits the write pointer to the hardware | |
104 | */ | |
105 | static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring) | |
106 | { | |
107 | struct amdgpu_device *adev = ring->adev; | |
108 | ||
109 | if (ring == &adev->vce.ring[0]) | |
110 | WREG32(mmVCE_RB_WPTR, ring->wptr); | |
6f0359ff | 111 | else if (ring == &adev->vce.ring[1]) |
aaa36a97 | 112 | WREG32(mmVCE_RB_WPTR2, ring->wptr); |
6f0359ff AD |
113 | else |
114 | WREG32(mmVCE_RB_WPTR3, ring->wptr); | |
aaa36a97 AD |
115 | } |
116 | ||
0689a570 EH |
117 | static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override) |
118 | { | |
f3f0ea95 | 119 | WREG32_FIELD(VCE_RB_ARB_CTRL, VCE_CGTT_OVERRIDE, override ? 1 : 0); |
0689a570 EH |
120 | } |
121 | ||
122 | static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, | |
123 | bool gated) | |
124 | { | |
f3f0ea95 | 125 | u32 data; |
f16fe6d3 | 126 | |
0689a570 EH |
127 | /* Set Override to disable Clock Gating */ |
128 | vce_v3_0_override_vce_clock_gating(adev, true); | |
129 | ||
6f906814 TSD |
130 | /* This function enables MGCG which is controlled by firmware. |
131 | With the clocks in the gated state the core is still | |
132 | accessible but the firmware will throttle the clocks on the | |
133 | fly as necessary. | |
134 | */ | |
135 | if (gated) { | |
f3f0ea95 | 136 | data = RREG32(mmVCE_CLOCK_GATING_B); |
0689a570 EH |
137 | data |= 0x1ff; |
138 | data &= ~0xef0000; | |
f3f0ea95 | 139 | WREG32(mmVCE_CLOCK_GATING_B, data); |
0689a570 | 140 | |
f3f0ea95 | 141 | data = RREG32(mmVCE_UENC_CLOCK_GATING); |
0689a570 EH |
142 | data |= 0x3ff000; |
143 | data &= ~0xffc00000; | |
f3f0ea95 | 144 | WREG32(mmVCE_UENC_CLOCK_GATING, data); |
0689a570 | 145 | |
f3f0ea95 | 146 | data = RREG32(mmVCE_UENC_CLOCK_GATING_2); |
0689a570 | 147 | data |= 0x2; |
6f906814 | 148 | data &= ~0x00010000; |
f3f0ea95 | 149 | WREG32(mmVCE_UENC_CLOCK_GATING_2, data); |
0689a570 | 150 | |
f3f0ea95 | 151 | data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); |
0689a570 | 152 | data |= 0x37f; |
f3f0ea95 | 153 | WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); |
0689a570 | 154 | |
f3f0ea95 | 155 | data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); |
0689a570 | 156 | data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | |
f16fe6d3 TSD |
157 | VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | |
158 | VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | | |
159 | 0x8; | |
f3f0ea95 | 160 | WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); |
0689a570 | 161 | } else { |
f3f0ea95 | 162 | data = RREG32(mmVCE_CLOCK_GATING_B); |
0689a570 EH |
163 | data &= ~0x80010; |
164 | data |= 0xe70008; | |
f3f0ea95 | 165 | WREG32(mmVCE_CLOCK_GATING_B, data); |
6f906814 | 166 | |
f3f0ea95 | 167 | data = RREG32(mmVCE_UENC_CLOCK_GATING); |
0689a570 | 168 | data |= 0xffc00000; |
f3f0ea95 | 169 | WREG32(mmVCE_UENC_CLOCK_GATING, data); |
6f906814 | 170 | |
f3f0ea95 | 171 | data = RREG32(mmVCE_UENC_CLOCK_GATING_2); |
0689a570 | 172 | data |= 0x10000; |
f3f0ea95 | 173 | WREG32(mmVCE_UENC_CLOCK_GATING_2, data); |
6f906814 | 174 | |
f3f0ea95 | 175 | data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); |
0689a570 | 176 | data &= ~0xffc00000; |
f3f0ea95 | 177 | WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); |
6f906814 | 178 | |
f3f0ea95 | 179 | data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); |
0689a570 | 180 | data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | |
f16fe6d3 TSD |
181 | VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | |
182 | VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | | |
183 | 0x8); | |
f3f0ea95 | 184 | WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); |
0689a570 EH |
185 | } |
186 | vce_v3_0_override_vce_clock_gating(adev, false); | |
187 | } | |
188 | ||
567e6e29 | 189 | static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev) |
190 | { | |
191 | int i, j; | |
567e6e29 | 192 | |
193 | for (i = 0; i < 10; ++i) { | |
194 | for (j = 0; j < 100; ++j) { | |
b7e2e9f7 | 195 | uint32_t status = RREG32(mmVCE_STATUS); |
196 | ||
567e6e29 | 197 | if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK) |
198 | return 0; | |
199 | mdelay(10); | |
200 | } | |
201 | ||
202 | DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); | |
f3f0ea95 | 203 | WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); |
567e6e29 | 204 | mdelay(10); |
f3f0ea95 | 205 | WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0); |
567e6e29 | 206 | mdelay(10); |
207 | } | |
208 | ||
209 | return -ETIMEDOUT; | |
210 | } | |
211 | ||
aaa36a97 AD |
212 | /** |
213 | * vce_v3_0_start - start VCE block | |
214 | * | |
215 | * @adev: amdgpu_device pointer | |
216 | * | |
217 | * Setup and start the VCE block | |
218 | */ | |
219 | static int vce_v3_0_start(struct amdgpu_device *adev) | |
220 | { | |
221 | struct amdgpu_ring *ring; | |
567e6e29 | 222 | int idx, r; |
223 | ||
224 | ring = &adev->vce.ring[0]; | |
225 | WREG32(mmVCE_RB_RPTR, ring->wptr); | |
226 | WREG32(mmVCE_RB_WPTR, ring->wptr); | |
227 | WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); | |
228 | WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); | |
229 | WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); | |
230 | ||
231 | ring = &adev->vce.ring[1]; | |
232 | WREG32(mmVCE_RB_RPTR2, ring->wptr); | |
233 | WREG32(mmVCE_RB_WPTR2, ring->wptr); | |
234 | WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); | |
235 | WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); | |
236 | WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); | |
5bbc553a | 237 | |
6f0359ff AD |
238 | ring = &adev->vce.ring[2]; |
239 | WREG32(mmVCE_RB_RPTR3, ring->wptr); | |
240 | WREG32(mmVCE_RB_WPTR3, ring->wptr); | |
241 | WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr); | |
242 | WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr)); | |
243 | WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4); | |
244 | ||
5bbc553a LL |
245 | mutex_lock(&adev->grbm_idx_mutex); |
246 | for (idx = 0; idx < 2; ++idx) { | |
6a585777 AD |
247 | if (adev->vce.harvest_config & (1 << idx)) |
248 | continue; | |
249 | ||
f3f0ea95 | 250 | WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); |
5bbc553a | 251 | vce_v3_0_mc_resume(adev, idx); |
f3f0ea95 | 252 | WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); |
567e6e29 | 253 | |
3c0ff9f1 LL |
254 | if (adev->asic_type >= CHIP_STONEY) |
255 | WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001); | |
256 | else | |
f3f0ea95 | 257 | WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1); |
5bbc553a | 258 | |
f3f0ea95 | 259 | WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0); |
567e6e29 | 260 | mdelay(100); |
261 | ||
262 | r = vce_v3_0_firmware_loaded(adev); | |
5bbc553a LL |
263 | |
264 | /* clear BUSY flag */ | |
f3f0ea95 | 265 | WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0); |
aaa36a97 | 266 | |
5bbc553a LL |
267 | if (r) { |
268 | DRM_ERROR("VCE not responding, giving up!!!\n"); | |
269 | mutex_unlock(&adev->grbm_idx_mutex); | |
270 | return r; | |
271 | } | |
272 | } | |
aaa36a97 | 273 | |
f3f0ea95 | 274 | WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); |
5bbc553a | 275 | mutex_unlock(&adev->grbm_idx_mutex); |
aaa36a97 | 276 | |
567e6e29 | 277 | return 0; |
278 | } | |
aaa36a97 | 279 | |
567e6e29 | 280 | static int vce_v3_0_stop(struct amdgpu_device *adev) |
281 | { | |
282 | int idx; | |
283 | ||
284 | mutex_lock(&adev->grbm_idx_mutex); | |
285 | for (idx = 0; idx < 2; ++idx) { | |
286 | if (adev->vce.harvest_config & (1 << idx)) | |
287 | continue; | |
288 | ||
f3f0ea95 | 289 | WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); |
567e6e29 | 290 | |
291 | if (adev->asic_type >= CHIP_STONEY) | |
292 | WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); | |
293 | else | |
f3f0ea95 TSD |
294 | WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0); |
295 | ||
567e6e29 | 296 | /* hold on ECPU */ |
f3f0ea95 | 297 | WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); |
567e6e29 | 298 | |
299 | /* clear BUSY flag */ | |
f3f0ea95 | 300 | WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0); |
567e6e29 | 301 | |
302 | /* Set Clock-Gating off */ | |
303 | if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) | |
304 | vce_v3_0_set_vce_sw_clock_gating(adev, false); | |
305 | } | |
306 | ||
f3f0ea95 | 307 | WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); |
567e6e29 | 308 | mutex_unlock(&adev->grbm_idx_mutex); |
aaa36a97 | 309 | |
aaa36a97 AD |
310 | return 0; |
311 | } | |
312 | ||
6a585777 AD |
313 | #define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074 |
314 | #define VCE_HARVEST_FUSE_MACRO__SHIFT 27 | |
315 | #define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000 | |
316 | ||
317 | static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev) | |
318 | { | |
319 | u32 tmp; | |
6a585777 | 320 | |
2cc0c0b5 | 321 | /* Fiji, Stoney, Polaris10, Polaris11 are single pipe */ |
cfaba566 | 322 | if ((adev->asic_type == CHIP_FIJI) || |
1b4eeea5 | 323 | (adev->asic_type == CHIP_STONEY) || |
2cc0c0b5 FC |
324 | (adev->asic_type == CHIP_POLARIS10) || |
325 | (adev->asic_type == CHIP_POLARIS11)) | |
1dab5f06 | 326 | return AMDGPU_VCE_HARVEST_VCE1; |
188a9bcd AD |
327 | |
328 | /* Tonga and CZ are dual or single pipe */ | |
2f7d10b3 | 329 | if (adev->flags & AMD_IS_APU) |
6a585777 AD |
330 | tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) & |
331 | VCE_HARVEST_FUSE_MACRO__MASK) >> | |
332 | VCE_HARVEST_FUSE_MACRO__SHIFT; | |
333 | else | |
334 | tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) & | |
335 | CC_HARVEST_FUSES__VCE_DISABLE_MASK) >> | |
336 | CC_HARVEST_FUSES__VCE_DISABLE__SHIFT; | |
337 | ||
338 | switch (tmp) { | |
339 | case 1: | |
1dab5f06 | 340 | return AMDGPU_VCE_HARVEST_VCE0; |
6a585777 | 341 | case 2: |
1dab5f06 | 342 | return AMDGPU_VCE_HARVEST_VCE1; |
6a585777 | 343 | case 3: |
1dab5f06 | 344 | return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1; |
6a585777 | 345 | default: |
1dab5f06 | 346 | return 0; |
6a585777 | 347 | } |
6a585777 AD |
348 | } |
349 | ||
5fc3aeeb | 350 | static int vce_v3_0_early_init(void *handle) |
aaa36a97 | 351 | { |
5fc3aeeb | 352 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
353 | ||
6a585777 AD |
354 | adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev); |
355 | ||
356 | if ((adev->vce.harvest_config & | |
357 | (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) == | |
358 | (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) | |
359 | return -ENOENT; | |
360 | ||
6f0359ff | 361 | adev->vce.num_rings = 3; |
75c65480 | 362 | |
aaa36a97 AD |
363 | vce_v3_0_set_ring_funcs(adev); |
364 | vce_v3_0_set_irq_funcs(adev); | |
365 | ||
366 | return 0; | |
367 | } | |
368 | ||
5fc3aeeb | 369 | static int vce_v3_0_sw_init(void *handle) |
aaa36a97 | 370 | { |
5fc3aeeb | 371 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 | 372 | struct amdgpu_ring *ring; |
75c65480 | 373 | int r, i; |
aaa36a97 AD |
374 | |
375 | /* VCE */ | |
376 | r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq); | |
377 | if (r) | |
378 | return r; | |
379 | ||
e9822622 LL |
380 | r = amdgpu_vce_sw_init(adev, VCE_V3_0_FW_SIZE + |
381 | (VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE) * 2); | |
aaa36a97 AD |
382 | if (r) |
383 | return r; | |
384 | ||
385 | r = amdgpu_vce_resume(adev); | |
386 | if (r) | |
387 | return r; | |
388 | ||
75c65480 AD |
389 | for (i = 0; i < adev->vce.num_rings; i++) { |
390 | ring = &adev->vce.ring[i]; | |
391 | sprintf(ring->name, "vce%d", i); | |
392 | r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf, | |
393 | &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE); | |
394 | if (r) | |
395 | return r; | |
396 | } | |
aaa36a97 AD |
397 | |
398 | return r; | |
399 | } | |
400 | ||
5fc3aeeb | 401 | static int vce_v3_0_sw_fini(void *handle) |
aaa36a97 AD |
402 | { |
403 | int r; | |
5fc3aeeb | 404 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
405 | |
406 | r = amdgpu_vce_suspend(adev); | |
407 | if (r) | |
408 | return r; | |
409 | ||
410 | r = amdgpu_vce_sw_fini(adev); | |
411 | if (r) | |
412 | return r; | |
413 | ||
414 | return r; | |
415 | } | |
416 | ||
5fc3aeeb | 417 | static int vce_v3_0_hw_init(void *handle) |
aaa36a97 | 418 | { |
691ca86a | 419 | int r, i; |
5fc3aeeb | 420 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
421 | |
422 | r = vce_v3_0_start(adev); | |
423 | if (r) | |
424 | return r; | |
425 | ||
75c65480 AD |
426 | for (i = 0; i < adev->vce.num_rings; i++) |
427 | adev->vce.ring[i].ready = false; | |
aaa36a97 | 428 | |
75c65480 | 429 | for (i = 0; i < adev->vce.num_rings; i++) { |
691ca86a TSD |
430 | r = amdgpu_ring_test_ring(&adev->vce.ring[i]); |
431 | if (r) | |
432 | return r; | |
433 | else | |
434 | adev->vce.ring[i].ready = true; | |
aaa36a97 AD |
435 | } |
436 | ||
437 | DRM_INFO("VCE initialized successfully.\n"); | |
438 | ||
439 | return 0; | |
440 | } | |
441 | ||
5fc3aeeb | 442 | static int vce_v3_0_hw_fini(void *handle) |
aaa36a97 | 443 | { |
567e6e29 | 444 | int r; |
445 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
446 | ||
447 | r = vce_v3_0_wait_for_idle(handle); | |
448 | if (r) | |
449 | return r; | |
450 | ||
451 | return vce_v3_0_stop(adev); | |
aaa36a97 AD |
452 | } |
453 | ||
5fc3aeeb | 454 | static int vce_v3_0_suspend(void *handle) |
aaa36a97 AD |
455 | { |
456 | int r; | |
5fc3aeeb | 457 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
458 | |
459 | r = vce_v3_0_hw_fini(adev); | |
460 | if (r) | |
461 | return r; | |
462 | ||
463 | r = amdgpu_vce_suspend(adev); | |
464 | if (r) | |
465 | return r; | |
466 | ||
467 | return r; | |
468 | } | |
469 | ||
5fc3aeeb | 470 | static int vce_v3_0_resume(void *handle) |
aaa36a97 AD |
471 | { |
472 | int r; | |
5fc3aeeb | 473 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
474 | |
475 | r = amdgpu_vce_resume(adev); | |
476 | if (r) | |
477 | return r; | |
478 | ||
479 | r = vce_v3_0_hw_init(adev); | |
480 | if (r) | |
481 | return r; | |
482 | ||
483 | return r; | |
484 | } | |
485 | ||
5bbc553a | 486 | static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) |
aaa36a97 AD |
487 | { |
488 | uint32_t offset, size; | |
489 | ||
490 | WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16)); | |
491 | WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); | |
492 | WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); | |
6f906814 | 493 | WREG32(mmVCE_CLOCK_GATING_B, 0x1FF); |
aaa36a97 AD |
494 | |
495 | WREG32(mmVCE_LMI_CTRL, 0x00398000); | |
496 | WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1); | |
497 | WREG32(mmVCE_LMI_SWAP_CNTL, 0); | |
498 | WREG32(mmVCE_LMI_SWAP_CNTL1, 0); | |
499 | WREG32(mmVCE_LMI_VM_CTRL, 0); | |
3c0ff9f1 LL |
500 | if (adev->asic_type >= CHIP_STONEY) { |
501 | WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8)); | |
502 | WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8)); | |
503 | WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8)); | |
504 | } else | |
505 | WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8)); | |
aaa36a97 | 506 | offset = AMDGPU_VCE_FIRMWARE_OFFSET; |
e9822622 | 507 | size = VCE_V3_0_FW_SIZE; |
aaa36a97 AD |
508 | WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff); |
509 | WREG32(mmVCE_VCPU_CACHE_SIZE0, size); | |
510 | ||
5bbc553a LL |
511 | if (idx == 0) { |
512 | offset += size; | |
513 | size = VCE_V3_0_STACK_SIZE; | |
514 | WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff); | |
515 | WREG32(mmVCE_VCPU_CACHE_SIZE1, size); | |
516 | offset += size; | |
517 | size = VCE_V3_0_DATA_SIZE; | |
518 | WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff); | |
519 | WREG32(mmVCE_VCPU_CACHE_SIZE2, size); | |
520 | } else { | |
521 | offset += size + VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE; | |
522 | size = VCE_V3_0_STACK_SIZE; | |
523 | WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0xfffffff); | |
524 | WREG32(mmVCE_VCPU_CACHE_SIZE1, size); | |
525 | offset += size; | |
526 | size = VCE_V3_0_DATA_SIZE; | |
527 | WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0xfffffff); | |
528 | WREG32(mmVCE_VCPU_CACHE_SIZE2, size); | |
529 | } | |
aaa36a97 AD |
530 | |
531 | WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); | |
f3f0ea95 | 532 | WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1); |
aaa36a97 AD |
533 | } |
534 | ||
5fc3aeeb | 535 | static bool vce_v3_0_is_idle(void *handle) |
aaa36a97 | 536 | { |
5fc3aeeb | 537 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
be4f38e2 | 538 | u32 mask = 0; |
be4f38e2 | 539 | |
74af1276 TSD |
540 | mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK; |
541 | mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK; | |
be4f38e2 AD |
542 | |
543 | return !(RREG32(mmSRBM_STATUS2) & mask); | |
aaa36a97 AD |
544 | } |
545 | ||
5fc3aeeb | 546 | static int vce_v3_0_wait_for_idle(void *handle) |
aaa36a97 AD |
547 | { |
548 | unsigned i; | |
5fc3aeeb | 549 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
be4f38e2 | 550 | |
92988e60 TSD |
551 | for (i = 0; i < adev->usec_timeout; i++) |
552 | if (vce_v3_0_is_idle(handle)) | |
aaa36a97 | 553 | return 0; |
92988e60 | 554 | |
aaa36a97 AD |
555 | return -ETIMEDOUT; |
556 | } | |
557 | ||
ac8e3f30 RZ |
558 | #define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */ |
559 | #define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */ | |
560 | #define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */ | |
561 | #define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \ | |
562 | VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK) | |
115933a5 | 563 | |
da146d3b | 564 | static bool vce_v3_0_check_soft_reset(void *handle) |
115933a5 CZ |
565 | { |
566 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
567 | u32 srbm_soft_reset = 0; | |
115933a5 | 568 | |
115933a5 CZ |
569 | /* According to VCE team , we should use VCE_STATUS instead |
570 | * SRBM_STATUS.VCE_BUSY bit for busy status checking. | |
571 | * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE | |
572 | * instance's registers are accessed | |
573 | * (0 for 1st instance, 10 for 2nd instance). | |
574 | * | |
575 | *VCE_STATUS | |
576 | *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB | | |
577 | *|----+----+-----------+----+----+----+----------+---------+----| | |
578 | *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0| | |
579 | * | |
580 | * VCE team suggest use bit 3--bit 6 for busy status check | |
581 | */ | |
9aeb774c | 582 | mutex_lock(&adev->grbm_idx_mutex); |
f3f0ea95 | 583 | WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); |
115933a5 CZ |
584 | if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { |
585 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); | |
586 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); | |
587 | } | |
f3f0ea95 | 588 | WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10); |
115933a5 CZ |
589 | if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { |
590 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); | |
591 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); | |
592 | } | |
f3f0ea95 | 593 | WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); |
da146d3b | 594 | mutex_unlock(&adev->grbm_idx_mutex); |
115933a5 | 595 | |
115933a5 | 596 | if (srbm_soft_reset) { |
115933a5 | 597 | adev->vce.srbm_soft_reset = srbm_soft_reset; |
da146d3b | 598 | return true; |
115933a5 | 599 | } else { |
115933a5 | 600 | adev->vce.srbm_soft_reset = 0; |
da146d3b | 601 | return false; |
115933a5 | 602 | } |
115933a5 CZ |
603 | } |
604 | ||
5fc3aeeb | 605 | static int vce_v3_0_soft_reset(void *handle) |
aaa36a97 | 606 | { |
5fc3aeeb | 607 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
115933a5 CZ |
608 | u32 srbm_soft_reset; |
609 | ||
da146d3b | 610 | if (!adev->vce.srbm_soft_reset) |
115933a5 CZ |
611 | return 0; |
612 | srbm_soft_reset = adev->vce.srbm_soft_reset; | |
613 | ||
614 | if (srbm_soft_reset) { | |
615 | u32 tmp; | |
be4f38e2 | 616 | |
115933a5 CZ |
617 | tmp = RREG32(mmSRBM_SOFT_RESET); |
618 | tmp |= srbm_soft_reset; | |
619 | dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); | |
620 | WREG32(mmSRBM_SOFT_RESET, tmp); | |
621 | tmp = RREG32(mmSRBM_SOFT_RESET); | |
622 | ||
623 | udelay(50); | |
624 | ||
625 | tmp &= ~srbm_soft_reset; | |
626 | WREG32(mmSRBM_SOFT_RESET, tmp); | |
627 | tmp = RREG32(mmSRBM_SOFT_RESET); | |
628 | ||
629 | /* Wait a little for things to settle down */ | |
630 | udelay(50); | |
631 | } | |
632 | ||
633 | return 0; | |
634 | } | |
635 | ||
636 | static int vce_v3_0_pre_soft_reset(void *handle) | |
637 | { | |
638 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
639 | ||
da146d3b | 640 | if (!adev->vce.srbm_soft_reset) |
115933a5 CZ |
641 | return 0; |
642 | ||
643 | mdelay(5); | |
644 | ||
645 | return vce_v3_0_suspend(adev); | |
646 | } | |
647 | ||
648 | ||
649 | static int vce_v3_0_post_soft_reset(void *handle) | |
650 | { | |
651 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
652 | ||
da146d3b | 653 | if (!adev->vce.srbm_soft_reset) |
115933a5 | 654 | return 0; |
5fc3aeeb | 655 | |
aaa36a97 AD |
656 | mdelay(5); |
657 | ||
115933a5 | 658 | return vce_v3_0_resume(adev); |
aaa36a97 AD |
659 | } |
660 | ||
aaa36a97 AD |
661 | static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev, |
662 | struct amdgpu_irq_src *source, | |
663 | unsigned type, | |
664 | enum amdgpu_interrupt_state state) | |
665 | { | |
666 | uint32_t val = 0; | |
667 | ||
668 | if (state == AMDGPU_IRQ_STATE_ENABLE) | |
669 | val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK; | |
670 | ||
671 | WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); | |
672 | return 0; | |
673 | } | |
674 | ||
675 | static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, | |
676 | struct amdgpu_irq_src *source, | |
677 | struct amdgpu_iv_entry *entry) | |
678 | { | |
679 | DRM_DEBUG("IH: VCE\n"); | |
d6c29c30 | 680 | |
f3f0ea95 | 681 | WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1); |
d6c29c30 | 682 | |
aaa36a97 AD |
683 | switch (entry->src_data) { |
684 | case 0: | |
aaa36a97 | 685 | case 1: |
6f0359ff | 686 | case 2: |
81da2ede | 687 | amdgpu_fence_process(&adev->vce.ring[entry->src_data]); |
aaa36a97 AD |
688 | break; |
689 | default: | |
690 | DRM_ERROR("Unhandled interrupt: %d %d\n", | |
691 | entry->src_id, entry->src_data); | |
692 | break; | |
693 | } | |
694 | ||
695 | return 0; | |
696 | } | |
697 | ||
0174df4e | 698 | static void vce_v3_0_set_bypass_mode(struct amdgpu_device *adev, bool enable) |
ec38f188 RZ |
699 | { |
700 | u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); | |
701 | ||
702 | if (enable) | |
703 | tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; | |
704 | else | |
705 | tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; | |
706 | ||
707 | WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); | |
708 | } | |
709 | ||
5fc3aeeb | 710 | static int vce_v3_0_set_clockgating_state(void *handle, |
711 | enum amd_clockgating_state state) | |
aaa36a97 | 712 | { |
0689a570 EH |
713 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
714 | bool enable = (state == AMD_CG_STATE_GATE) ? true : false; | |
715 | int i; | |
716 | ||
c04399f1 | 717 | if ((adev->asic_type == CHIP_POLARIS10) || |
3374dceb RZ |
718 | (adev->asic_type == CHIP_TONGA) || |
719 | (adev->asic_type == CHIP_FIJI)) | |
0174df4e | 720 | vce_v3_0_set_bypass_mode(adev, enable); |
ec38f188 | 721 | |
e3b04bc7 | 722 | if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) |
0689a570 EH |
723 | return 0; |
724 | ||
725 | mutex_lock(&adev->grbm_idx_mutex); | |
726 | for (i = 0; i < 2; i++) { | |
727 | /* Program VCE Instance 0 or 1 if not harvested */ | |
728 | if (adev->vce.harvest_config & (1 << i)) | |
729 | continue; | |
730 | ||
f3f0ea95 | 731 | WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i); |
0689a570 EH |
732 | |
733 | if (enable) { | |
734 | /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ | |
735 | uint32_t data = RREG32(mmVCE_CLOCK_GATING_A); | |
736 | data &= ~(0xf | 0xff0); | |
737 | data |= ((0x0 << 0) | (0x04 << 4)); | |
738 | WREG32(mmVCE_CLOCK_GATING_A, data); | |
739 | ||
740 | /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */ | |
741 | data = RREG32(mmVCE_UENC_CLOCK_GATING); | |
742 | data &= ~(0xf | 0xff0); | |
743 | data |= ((0x0 << 0) | (0x04 << 4)); | |
744 | WREG32(mmVCE_UENC_CLOCK_GATING, data); | |
745 | } | |
746 | ||
747 | vce_v3_0_set_vce_sw_clock_gating(adev, enable); | |
748 | } | |
749 | ||
f3f0ea95 | 750 | WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); |
0689a570 EH |
751 | mutex_unlock(&adev->grbm_idx_mutex); |
752 | ||
aaa36a97 AD |
753 | return 0; |
754 | } | |
755 | ||
5fc3aeeb | 756 | static int vce_v3_0_set_powergating_state(void *handle, |
757 | enum amd_powergating_state state) | |
aaa36a97 AD |
758 | { |
759 | /* This doesn't actually powergate the VCE block. | |
760 | * That's done in the dpm code via the SMC. This | |
761 | * just re-inits the block as necessary. The actual | |
762 | * gating still happens in the dpm code. We should | |
763 | * revisit this when there is a cleaner line between | |
764 | * the smc and the hw blocks | |
765 | */ | |
5fc3aeeb | 766 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
767 | ||
e3b04bc7 | 768 | if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE)) |
808a934f AD |
769 | return 0; |
770 | ||
5fc3aeeb | 771 | if (state == AMD_PG_STATE_GATE) |
aaa36a97 AD |
772 | /* XXX do we need a vce_v3_0_stop()? */ |
773 | return 0; | |
774 | else | |
775 | return vce_v3_0_start(adev); | |
776 | } | |
777 | ||
ea4a8c1d MSB |
778 | static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring, |
779 | struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch) | |
780 | { | |
781 | amdgpu_ring_write(ring, VCE_CMD_IB_VM); | |
782 | amdgpu_ring_write(ring, vm_id); | |
783 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); | |
784 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); | |
785 | amdgpu_ring_write(ring, ib->length_dw); | |
786 | } | |
787 | ||
788 | static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring, | |
789 | unsigned int vm_id, uint64_t pd_addr) | |
790 | { | |
791 | amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB); | |
792 | amdgpu_ring_write(ring, vm_id); | |
793 | amdgpu_ring_write(ring, pd_addr >> 12); | |
794 | ||
795 | amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB); | |
796 | amdgpu_ring_write(ring, vm_id); | |
797 | amdgpu_ring_write(ring, VCE_CMD_END); | |
798 | } | |
799 | ||
800 | static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring) | |
801 | { | |
802 | uint32_t seq = ring->fence_drv.sync_seq; | |
803 | uint64_t addr = ring->fence_drv.gpu_addr; | |
804 | ||
805 | amdgpu_ring_write(ring, VCE_CMD_WAIT_GE); | |
806 | amdgpu_ring_write(ring, lower_32_bits(addr)); | |
807 | amdgpu_ring_write(ring, upper_32_bits(addr)); | |
808 | amdgpu_ring_write(ring, seq); | |
809 | } | |
810 | ||
58fafbaf AD |
811 | static unsigned vce_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring) |
812 | { | |
813 | return | |
814 | 5; /* vce_v3_0_ring_emit_ib */ | |
815 | } | |
816 | ||
817 | static unsigned vce_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring) | |
818 | { | |
819 | return | |
820 | 4 + /* vce_v3_0_emit_pipeline_sync */ | |
821 | 6; /* amdgpu_vce_ring_emit_fence x1 no user fence */ | |
822 | } | |
823 | ||
824 | static unsigned vce_v3_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring) | |
825 | { | |
826 | return | |
827 | 6 + /* vce_v3_0_emit_vm_flush */ | |
828 | 4 + /* vce_v3_0_emit_pipeline_sync */ | |
829 | 6 + 6; /* amdgpu_vce_ring_emit_fence x2 vm fence */ | |
830 | } | |
831 | ||
5fc3aeeb | 832 | const struct amd_ip_funcs vce_v3_0_ip_funcs = { |
88a907d6 | 833 | .name = "vce_v3_0", |
aaa36a97 AD |
834 | .early_init = vce_v3_0_early_init, |
835 | .late_init = NULL, | |
836 | .sw_init = vce_v3_0_sw_init, | |
837 | .sw_fini = vce_v3_0_sw_fini, | |
838 | .hw_init = vce_v3_0_hw_init, | |
839 | .hw_fini = vce_v3_0_hw_fini, | |
840 | .suspend = vce_v3_0_suspend, | |
841 | .resume = vce_v3_0_resume, | |
842 | .is_idle = vce_v3_0_is_idle, | |
843 | .wait_for_idle = vce_v3_0_wait_for_idle, | |
115933a5 CZ |
844 | .check_soft_reset = vce_v3_0_check_soft_reset, |
845 | .pre_soft_reset = vce_v3_0_pre_soft_reset, | |
aaa36a97 | 846 | .soft_reset = vce_v3_0_soft_reset, |
115933a5 | 847 | .post_soft_reset = vce_v3_0_post_soft_reset, |
aaa36a97 AD |
848 | .set_clockgating_state = vce_v3_0_set_clockgating_state, |
849 | .set_powergating_state = vce_v3_0_set_powergating_state, | |
850 | }; | |
851 | ||
ea4a8c1d | 852 | static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = { |
aaa36a97 AD |
853 | .get_rptr = vce_v3_0_ring_get_rptr, |
854 | .get_wptr = vce_v3_0_ring_get_wptr, | |
855 | .set_wptr = vce_v3_0_ring_set_wptr, | |
856 | .parse_cs = amdgpu_vce_ring_parse_cs, | |
857 | .emit_ib = amdgpu_vce_ring_emit_ib, | |
858 | .emit_fence = amdgpu_vce_ring_emit_fence, | |
aaa36a97 AD |
859 | .test_ring = amdgpu_vce_ring_test_ring, |
860 | .test_ib = amdgpu_vce_ring_test_ib, | |
edff0e28 | 861 | .insert_nop = amdgpu_ring_insert_nop, |
9e5d5309 | 862 | .pad_ib = amdgpu_ring_generic_pad_ib, |
ebff485e CK |
863 | .begin_use = amdgpu_vce_ring_begin_use, |
864 | .end_use = amdgpu_vce_ring_end_use, | |
58fafbaf AD |
865 | .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size, |
866 | .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size, | |
aaa36a97 AD |
867 | }; |
868 | ||
ea4a8c1d MSB |
869 | static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = { |
870 | .get_rptr = vce_v3_0_ring_get_rptr, | |
871 | .get_wptr = vce_v3_0_ring_get_wptr, | |
872 | .set_wptr = vce_v3_0_ring_set_wptr, | |
873 | .parse_cs = NULL, | |
874 | .emit_ib = vce_v3_0_ring_emit_ib, | |
875 | .emit_vm_flush = vce_v3_0_emit_vm_flush, | |
876 | .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync, | |
877 | .emit_fence = amdgpu_vce_ring_emit_fence, | |
878 | .test_ring = amdgpu_vce_ring_test_ring, | |
879 | .test_ib = amdgpu_vce_ring_test_ib, | |
880 | .insert_nop = amdgpu_ring_insert_nop, | |
881 | .pad_ib = amdgpu_ring_generic_pad_ib, | |
882 | .begin_use = amdgpu_vce_ring_begin_use, | |
883 | .end_use = amdgpu_vce_ring_end_use, | |
58fafbaf AD |
884 | .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size, |
885 | .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size_vm, | |
ea4a8c1d MSB |
886 | }; |
887 | ||
aaa36a97 AD |
888 | static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev) |
889 | { | |
75c65480 AD |
890 | int i; |
891 | ||
ea4a8c1d MSB |
892 | if (adev->asic_type >= CHIP_STONEY) { |
893 | for (i = 0; i < adev->vce.num_rings; i++) | |
894 | adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs; | |
895 | DRM_INFO("VCE enabled in VM mode\n"); | |
896 | } else { | |
897 | for (i = 0; i < adev->vce.num_rings; i++) | |
898 | adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs; | |
899 | DRM_INFO("VCE enabled in physical mode\n"); | |
900 | } | |
aaa36a97 AD |
901 | } |
902 | ||
903 | static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = { | |
904 | .set = vce_v3_0_set_interrupt_state, | |
905 | .process = vce_v3_0_process_interrupt, | |
906 | }; | |
907 | ||
908 | static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev) | |
909 | { | |
910 | adev->vce.irq.num_types = 1; | |
911 | adev->vce.irq.funcs = &vce_v3_0_irq_funcs; | |
912 | }; |