Commit | Line | Data |
---|---|---|
aaa36a97 AD |
1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: Christian König <christian.koenig@amd.com> | |
23 | */ | |
24 | ||
25 | #include <linux/firmware.h> | |
47b757fb | 26 | |
aaa36a97 AD |
27 | #include "amdgpu.h" |
28 | #include "amdgpu_uvd.h" | |
29 | #include "vid.h" | |
30 | #include "uvd/uvd_6_0_d.h" | |
31 | #include "uvd/uvd_6_0_sh_mask.h" | |
32 | #include "oss/oss_2_0_d.h" | |
33 | #include "oss/oss_2_0_sh_mask.h" | |
a0cdef9e AD |
34 | #include "smu/smu_7_1_3_d.h" |
35 | #include "smu/smu_7_1_3_sh_mask.h" | |
d5b4e25d | 36 | #include "bif/bif_5_1_d.h" |
0f30a397 | 37 | #include "gmc/gmc_8_1_d.h" |
be3ecca7 | 38 | #include "vi.h" |
091aec0b | 39 | #include "ivsrcid/ivsrcid_vislands30.h" |
aaa36a97 | 40 | |
dead73d7 JZ |
41 | /* Polaris10/11/12 firmware version */ |
42 | #define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8)) | |
43 | ||
aaa36a97 | 44 | static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev); |
c259ee6e JZ |
45 | static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev); |
46 | ||
aaa36a97 AD |
47 | static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev); |
48 | static int uvd_v6_0_start(struct amdgpu_device *adev); | |
49 | static void uvd_v6_0_stop(struct amdgpu_device *adev); | |
be3ecca7 | 50 | static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev); |
f2ba8c3d | 51 | static int uvd_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, |
805b3ba8 RZ |
52 | enum amd_clockgating_state state); |
53 | static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev, | |
54 | bool enable); | |
aaa36a97 | 55 | |
06a7e9cb JZ |
56 | /** |
57 | * uvd_v6_0_enc_support - get encode support status | |
58 | * | |
59 | * @adev: amdgpu_device pointer | |
60 | * | |
61 | * Returns the current hardware encode support status | |
62 | */ | |
63 | static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev) | |
64 | { | |
dead73d7 | 65 | return ((adev->asic_type >= CHIP_POLARIS10) && |
136b10ad | 66 | (adev->asic_type <= CHIP_VEGAM) && |
dead73d7 | 67 | (!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16)); |
06a7e9cb JZ |
68 | } |
69 | ||
aaa36a97 AD |
70 | /** |
71 | * uvd_v6_0_ring_get_rptr - get read pointer | |
72 | * | |
73 | * @ring: amdgpu_ring pointer | |
74 | * | |
75 | * Returns the current hardware read pointer | |
76 | */ | |
536fbf94 | 77 | static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring) |
aaa36a97 AD |
78 | { |
79 | struct amdgpu_device *adev = ring->adev; | |
80 | ||
81 | return RREG32(mmUVD_RBC_RB_RPTR); | |
82 | } | |
83 | ||
c0f2f2e6 JZ |
84 | /** |
85 | * uvd_v6_0_enc_ring_get_rptr - get enc read pointer | |
86 | * | |
87 | * @ring: amdgpu_ring pointer | |
88 | * | |
89 | * Returns the current hardware enc read pointer | |
90 | */ | |
91 | static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring) | |
92 | { | |
93 | struct amdgpu_device *adev = ring->adev; | |
94 | ||
2bb795f5 | 95 | if (ring == &adev->uvd.inst->ring_enc[0]) |
c0f2f2e6 JZ |
96 | return RREG32(mmUVD_RB_RPTR); |
97 | else | |
98 | return RREG32(mmUVD_RB_RPTR2); | |
99 | } | |
aaa36a97 AD |
100 | /** |
101 | * uvd_v6_0_ring_get_wptr - get write pointer | |
102 | * | |
103 | * @ring: amdgpu_ring pointer | |
104 | * | |
105 | * Returns the current hardware write pointer | |
106 | */ | |
536fbf94 | 107 | static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring) |
aaa36a97 AD |
108 | { |
109 | struct amdgpu_device *adev = ring->adev; | |
110 | ||
111 | return RREG32(mmUVD_RBC_RB_WPTR); | |
112 | } | |
113 | ||
c0f2f2e6 JZ |
114 | /** |
115 | * uvd_v6_0_enc_ring_get_wptr - get enc write pointer | |
116 | * | |
117 | * @ring: amdgpu_ring pointer | |
118 | * | |
119 | * Returns the current hardware enc write pointer | |
120 | */ | |
121 | static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring) | |
122 | { | |
123 | struct amdgpu_device *adev = ring->adev; | |
124 | ||
2bb795f5 | 125 | if (ring == &adev->uvd.inst->ring_enc[0]) |
c0f2f2e6 JZ |
126 | return RREG32(mmUVD_RB_WPTR); |
127 | else | |
128 | return RREG32(mmUVD_RB_WPTR2); | |
129 | } | |
130 | ||
aaa36a97 AD |
131 | /** |
132 | * uvd_v6_0_ring_set_wptr - set write pointer | |
133 | * | |
134 | * @ring: amdgpu_ring pointer | |
135 | * | |
136 | * Commits the write pointer to the hardware | |
137 | */ | |
138 | static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring) | |
139 | { | |
140 | struct amdgpu_device *adev = ring->adev; | |
141 | ||
536fbf94 | 142 | WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); |
aaa36a97 AD |
143 | } |
144 | ||
c0f2f2e6 JZ |
145 | /** |
146 | * uvd_v6_0_enc_ring_set_wptr - set enc write pointer | |
147 | * | |
148 | * @ring: amdgpu_ring pointer | |
149 | * | |
150 | * Commits the enc write pointer to the hardware | |
151 | */ | |
152 | static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring) | |
153 | { | |
154 | struct amdgpu_device *adev = ring->adev; | |
155 | ||
2bb795f5 | 156 | if (ring == &adev->uvd.inst->ring_enc[0]) |
c0f2f2e6 JZ |
157 | WREG32(mmUVD_RB_WPTR, |
158 | lower_32_bits(ring->wptr)); | |
159 | else | |
160 | WREG32(mmUVD_RB_WPTR2, | |
161 | lower_32_bits(ring->wptr)); | |
162 | } | |
163 | ||
2a91f272 JZ |
164 | /** |
165 | * uvd_v6_0_enc_ring_test_ring - test if UVD ENC ring is working | |
166 | * | |
167 | * @ring: the engine to test on | |
168 | * | |
169 | */ | |
170 | static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring) | |
171 | { | |
172 | struct amdgpu_device *adev = ring->adev; | |
517b91f4 | 173 | uint32_t rptr; |
2a91f272 JZ |
174 | unsigned i; |
175 | int r; | |
176 | ||
177 | r = amdgpu_ring_alloc(ring, 16); | |
dc9eeff8 | 178 | if (r) |
2a91f272 | 179 | return r; |
517b91f4 S |
180 | |
181 | rptr = amdgpu_ring_get_rptr(ring); | |
182 | ||
2a91f272 JZ |
183 | amdgpu_ring_write(ring, HEVC_ENC_CMD_END); |
184 | amdgpu_ring_commit(ring); | |
185 | ||
186 | for (i = 0; i < adev->usec_timeout; i++) { | |
187 | if (amdgpu_ring_get_rptr(ring) != rptr) | |
188 | break; | |
c366be54 | 189 | udelay(1); |
2a91f272 JZ |
190 | } |
191 | ||
dc9eeff8 | 192 | if (i >= adev->usec_timeout) |
2a91f272 | 193 | r = -ETIMEDOUT; |
2a91f272 JZ |
194 | |
195 | return r; | |
196 | } | |
197 | ||
e0128efb JZ |
198 | /** |
199 | * uvd_v6_0_enc_get_create_msg - generate a UVD ENC create msg | |
200 | * | |
e0128efb JZ |
201 | * @ring: ring we should submit the msg to |
202 | * @handle: session handle to use | |
166c2089 | 203 | * @bo: amdgpu object for which we query the offset |
e0128efb JZ |
204 | * @fence: optional fence to return |
205 | * | |
206 | * Open up a stream for HW test | |
207 | */ | |
208 | static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | |
ce584a8e | 209 | struct amdgpu_bo *bo, |
e0128efb JZ |
210 | struct dma_fence **fence) |
211 | { | |
212 | const unsigned ib_size_dw = 16; | |
213 | struct amdgpu_job *job; | |
214 | struct amdgpu_ib *ib; | |
215 | struct dma_fence *f = NULL; | |
ce584a8e | 216 | uint64_t addr; |
e0128efb JZ |
217 | int i, r; |
218 | ||
f7d66fb2 CK |
219 | r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, |
220 | AMDGPU_IB_POOL_DIRECT, &job); | |
e0128efb JZ |
221 | if (r) |
222 | return r; | |
223 | ||
224 | ib = &job->ibs[0]; | |
ce584a8e | 225 | addr = amdgpu_bo_gpu_offset(bo); |
e0128efb JZ |
226 | |
227 | ib->length_dw = 0; | |
228 | ib->ptr[ib->length_dw++] = 0x00000018; | |
229 | ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ | |
230 | ib->ptr[ib->length_dw++] = handle; | |
231 | ib->ptr[ib->length_dw++] = 0x00010000; | |
ce584a8e AD |
232 | ib->ptr[ib->length_dw++] = upper_32_bits(addr); |
233 | ib->ptr[ib->length_dw++] = addr; | |
e0128efb JZ |
234 | |
235 | ib->ptr[ib->length_dw++] = 0x00000014; | |
236 | ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ | |
237 | ib->ptr[ib->length_dw++] = 0x0000001c; | |
238 | ib->ptr[ib->length_dw++] = 0x00000001; | |
239 | ib->ptr[ib->length_dw++] = 0x00000000; | |
240 | ||
241 | ib->ptr[ib->length_dw++] = 0x00000008; | |
242 | ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */ | |
243 | ||
244 | for (i = ib->length_dw; i < ib_size_dw; ++i) | |
245 | ib->ptr[i] = 0x0; | |
246 | ||
ee913fd9 | 247 | r = amdgpu_job_submit_direct(job, ring, &f); |
e0128efb JZ |
248 | if (r) |
249 | goto err; | |
250 | ||
e0128efb JZ |
251 | if (fence) |
252 | *fence = dma_fence_get(f); | |
253 | dma_fence_put(f); | |
254 | return 0; | |
255 | ||
256 | err: | |
257 | amdgpu_job_free(job); | |
258 | return r; | |
259 | } | |
260 | ||
261 | /** | |
262 | * uvd_v6_0_enc_get_destroy_msg - generate a UVD ENC destroy msg | |
263 | * | |
e0128efb JZ |
264 | * @ring: ring we should submit the msg to |
265 | * @handle: session handle to use | |
166c2089 | 266 | * @bo: amdgpu object for which we query the offset |
e0128efb JZ |
267 | * @fence: optional fence to return |
268 | * | |
269 | * Close up a stream for HW test or if userspace failed to do so | |
270 | */ | |
f15507a1 CIK |
271 | static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring, |
272 | uint32_t handle, | |
ce584a8e | 273 | struct amdgpu_bo *bo, |
ec442fd3 | 274 | struct dma_fence **fence) |
e0128efb JZ |
275 | { |
276 | const unsigned ib_size_dw = 16; | |
277 | struct amdgpu_job *job; | |
278 | struct amdgpu_ib *ib; | |
279 | struct dma_fence *f = NULL; | |
ce584a8e | 280 | uint64_t addr; |
e0128efb JZ |
281 | int i, r; |
282 | ||
f7d66fb2 CK |
283 | r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, |
284 | AMDGPU_IB_POOL_DIRECT, &job); | |
e0128efb JZ |
285 | if (r) |
286 | return r; | |
287 | ||
288 | ib = &job->ibs[0]; | |
ce584a8e | 289 | addr = amdgpu_bo_gpu_offset(bo); |
e0128efb JZ |
290 | |
291 | ib->length_dw = 0; | |
292 | ib->ptr[ib->length_dw++] = 0x00000018; | |
293 | ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ | |
294 | ib->ptr[ib->length_dw++] = handle; | |
295 | ib->ptr[ib->length_dw++] = 0x00010000; | |
ce584a8e AD |
296 | ib->ptr[ib->length_dw++] = upper_32_bits(addr); |
297 | ib->ptr[ib->length_dw++] = addr; | |
e0128efb JZ |
298 | |
299 | ib->ptr[ib->length_dw++] = 0x00000014; | |
300 | ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ | |
301 | ib->ptr[ib->length_dw++] = 0x0000001c; | |
302 | ib->ptr[ib->length_dw++] = 0x00000001; | |
303 | ib->ptr[ib->length_dw++] = 0x00000000; | |
304 | ||
305 | ib->ptr[ib->length_dw++] = 0x00000008; | |
306 | ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */ | |
307 | ||
308 | for (i = ib->length_dw; i < ib_size_dw; ++i) | |
309 | ib->ptr[i] = 0x0; | |
310 | ||
ec442fd3 | 311 | r = amdgpu_job_submit_direct(job, ring, &f); |
ee913fd9 CK |
312 | if (r) |
313 | goto err; | |
e0128efb JZ |
314 | |
315 | if (fence) | |
316 | *fence = dma_fence_get(f); | |
317 | dma_fence_put(f); | |
318 | return 0; | |
319 | ||
320 | err: | |
321 | amdgpu_job_free(job); | |
322 | return r; | |
323 | } | |
324 | ||
325 | /** | |
326 | * uvd_v6_0_enc_ring_test_ib - test if UVD ENC IBs are working | |
327 | * | |
328 | * @ring: the engine to test on | |
166c2089 | 329 | * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT |
e0128efb JZ |
330 | * |
331 | */ | |
332 | static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |
333 | { | |
334 | struct dma_fence *fence = NULL; | |
68331d7c | 335 | struct amdgpu_bo *bo = ring->adev->uvd.ib_bo; |
e0128efb JZ |
336 | long r; |
337 | ||
ce584a8e | 338 | r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL); |
98079389 | 339 | if (r) |
e0128efb | 340 | goto error; |
e0128efb | 341 | |
ce584a8e | 342 | r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence); |
98079389 | 343 | if (r) |
e0128efb | 344 | goto error; |
e0128efb JZ |
345 | |
346 | r = dma_fence_wait_timeout(fence, false, timeout); | |
98079389 | 347 | if (r == 0) |
e0128efb | 348 | r = -ETIMEDOUT; |
98079389 | 349 | else if (r > 0) |
e0128efb | 350 | r = 0; |
98079389 | 351 | |
e0128efb JZ |
352 | error: |
353 | dma_fence_put(fence); | |
354 | return r; | |
355 | } | |
98079389 | 356 | |
146b085e | 357 | static int uvd_v6_0_early_init(struct amdgpu_ip_block *ip_block) |
aaa36a97 | 358 | { |
146b085e | 359 | struct amdgpu_device *adev = ip_block->adev; |
2bb795f5 | 360 | adev->uvd.num_uvd_inst = 1; |
5fc3aeeb | 361 | |
cb4b02d7 LL |
362 | if (!(adev->flags & AMD_IS_APU) && |
363 | (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK)) | |
364 | return -ENOENT; | |
365 | ||
aaa36a97 | 366 | uvd_v6_0_set_ring_funcs(adev); |
06a7e9cb JZ |
367 | |
368 | if (uvd_v6_0_enc_support(adev)) { | |
369 | adev->uvd.num_enc_rings = 2; | |
c259ee6e | 370 | uvd_v6_0_set_enc_ring_funcs(adev); |
06a7e9cb JZ |
371 | } |
372 | ||
aaa36a97 AD |
373 | uvd_v6_0_set_irq_funcs(adev); |
374 | ||
375 | return 0; | |
376 | } | |
377 | ||
d5347e8d | 378 | static int uvd_v6_0_sw_init(struct amdgpu_ip_block *ip_block) |
aaa36a97 AD |
379 | { |
380 | struct amdgpu_ring *ring; | |
06a7e9cb | 381 | int i, r; |
d5347e8d | 382 | struct amdgpu_device *adev = ip_block->adev; |
aaa36a97 AD |
383 | |
384 | /* UVD TRAP */ | |
1ffdeca6 | 385 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq); |
aaa36a97 AD |
386 | if (r) |
387 | return r; | |
388 | ||
65da0d40 JZ |
389 | /* UVD ENC TRAP */ |
390 | if (uvd_v6_0_enc_support(adev)) { | |
391 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { | |
1ffdeca6 | 392 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq); |
65da0d40 JZ |
393 | if (r) |
394 | return r; | |
395 | } | |
396 | } | |
397 | ||
aaa36a97 AD |
398 | r = amdgpu_uvd_sw_init(adev); |
399 | if (r) | |
400 | return r; | |
401 | ||
dead73d7 JZ |
402 | if (!uvd_v6_0_enc_support(adev)) { |
403 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) | |
2bb795f5 | 404 | adev->uvd.inst->ring_enc[i].funcs = NULL; |
dead73d7 | 405 | |
2bb795f5 | 406 | adev->uvd.inst->irq.num_types = 1; |
dead73d7 JZ |
407 | adev->uvd.num_enc_rings = 0; |
408 | ||
409 | DRM_INFO("UVD ENC is disabled\n"); | |
296191c5 JZ |
410 | } |
411 | ||
2bb795f5 | 412 | ring = &adev->uvd.inst->ring; |
aaa36a97 | 413 | sprintf(ring->name, "uvd"); |
1c6d567b | 414 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0, |
c107171b | 415 | AMDGPU_RING_PRIO_DEFAULT, NULL); |
06a7e9cb JZ |
416 | if (r) |
417 | return r; | |
418 | ||
3b34c14f CW |
419 | r = amdgpu_uvd_resume(adev); |
420 | if (r) | |
421 | return r; | |
422 | ||
06a7e9cb JZ |
423 | if (uvd_v6_0_enc_support(adev)) { |
424 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { | |
2bb795f5 | 425 | ring = &adev->uvd.inst->ring_enc[i]; |
06a7e9cb | 426 | sprintf(ring->name, "uvd_enc%d", i); |
1c6d567b ND |
427 | r = amdgpu_ring_init(adev, ring, 512, |
428 | &adev->uvd.inst->irq, 0, | |
c107171b | 429 | AMDGPU_RING_PRIO_DEFAULT, NULL); |
06a7e9cb JZ |
430 | if (r) |
431 | return r; | |
432 | } | |
433 | } | |
aaa36a97 AD |
434 | |
435 | return r; | |
436 | } | |
437 | ||
36aa9ab9 | 438 | static int uvd_v6_0_sw_fini(struct amdgpu_ip_block *ip_block) |
aaa36a97 | 439 | { |
06a7e9cb | 440 | int i, r; |
36aa9ab9 | 441 | struct amdgpu_device *adev = ip_block->adev; |
aaa36a97 AD |
442 | |
443 | r = amdgpu_uvd_suspend(adev); | |
444 | if (r) | |
445 | return r; | |
446 | ||
06a7e9cb JZ |
447 | if (uvd_v6_0_enc_support(adev)) { |
448 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) | |
2bb795f5 | 449 | amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]); |
06a7e9cb JZ |
450 | } |
451 | ||
50237287 | 452 | return amdgpu_uvd_sw_fini(adev); |
aaa36a97 AD |
453 | } |
454 | ||
455 | /** | |
456 | * uvd_v6_0_hw_init - start and test UVD block | |
457 | * | |
7e6487ab | 458 | * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. |
aaa36a97 AD |
459 | * |
460 | * Initialize the hardware, boot up the VCPU and do some testing | |
461 | */ | |
58608034 | 462 | static int uvd_v6_0_hw_init(struct amdgpu_ip_block *ip_block) |
aaa36a97 | 463 | { |
58608034 | 464 | struct amdgpu_device *adev = ip_block->adev; |
2bb795f5 | 465 | struct amdgpu_ring *ring = &adev->uvd.inst->ring; |
aaa36a97 | 466 | uint32_t tmp; |
2a91f272 | 467 | int i, r; |
aaa36a97 | 468 | |
e3e672e6 | 469 | amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); |
f2ba8c3d | 470 | uvd_v6_0_set_clockgating_state(ip_block, AMD_CG_STATE_UNGATE); |
e3e672e6 | 471 | uvd_v6_0_enable_mgcg(adev, true); |
aaa36a97 | 472 | |
c66ed765 AG |
473 | r = amdgpu_ring_test_helper(ring); |
474 | if (r) | |
aaa36a97 | 475 | goto done; |
aaa36a97 | 476 | |
a27de35c | 477 | r = amdgpu_ring_alloc(ring, 10); |
aaa36a97 AD |
478 | if (r) { |
479 | DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); | |
480 | goto done; | |
481 | } | |
482 | ||
483 | tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); | |
484 | amdgpu_ring_write(ring, tmp); | |
485 | amdgpu_ring_write(ring, 0xFFFFF); | |
486 | ||
487 | tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); | |
488 | amdgpu_ring_write(ring, tmp); | |
489 | amdgpu_ring_write(ring, 0xFFFFF); | |
490 | ||
491 | tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); | |
492 | amdgpu_ring_write(ring, tmp); | |
493 | amdgpu_ring_write(ring, 0xFFFFF); | |
494 | ||
495 | /* Clear timeout status bits */ | |
496 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); | |
497 | amdgpu_ring_write(ring, 0x8); | |
498 | ||
499 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); | |
500 | amdgpu_ring_write(ring, 3); | |
501 | ||
a27de35c | 502 | amdgpu_ring_commit(ring); |
aaa36a97 | 503 | |
2a91f272 JZ |
504 | if (uvd_v6_0_enc_support(adev)) { |
505 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { | |
2bb795f5 | 506 | ring = &adev->uvd.inst->ring_enc[i]; |
c66ed765 AG |
507 | r = amdgpu_ring_test_helper(ring); |
508 | if (r) | |
2a91f272 | 509 | goto done; |
2a91f272 JZ |
510 | } |
511 | } | |
512 | ||
aaa36a97 | 513 | done: |
c259ee6e JZ |
514 | if (!r) { |
515 | if (uvd_v6_0_enc_support(adev)) | |
516 | DRM_INFO("UVD and UVD ENC initialized successfully.\n"); | |
517 | else | |
518 | DRM_INFO("UVD initialized successfully.\n"); | |
519 | } | |
aaa36a97 AD |
520 | |
521 | return r; | |
522 | } | |
523 | ||
524 | /** | |
525 | * uvd_v6_0_hw_fini - stop the hardware block | |
526 | * | |
7e6487ab | 527 | * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. |
aaa36a97 AD |
528 | * |
529 | * Stop the UVD block, mark ring as not ready any more | |
530 | */ | |
692d2cd1 | 531 | static int uvd_v6_0_hw_fini(struct amdgpu_ip_block *ip_block) |
aaa36a97 | 532 | { |
692d2cd1 | 533 | struct amdgpu_device *adev = ip_block->adev; |
aaa36a97 | 534 | |
4fc30ea7 EQ |
535 | cancel_delayed_work_sync(&adev->uvd.idle_work); |
536 | ||
537 | if (RREG32(mmUVD_STATUS) != 0) | |
538 | uvd_v6_0_stop(adev); | |
539 | ||
540 | return 0; | |
541 | } | |
542 | ||
94b2e07a | 543 | static int uvd_v6_0_prepare_suspend(struct amdgpu_ip_block *ip_block) |
db998890 | 544 | { |
94b2e07a | 545 | struct amdgpu_device *adev = ip_block->adev; |
db998890 ML |
546 | |
547 | return amdgpu_uvd_prepare_suspend(adev); | |
548 | } | |
549 | ||
982d7f9b | 550 | static int uvd_v6_0_suspend(struct amdgpu_ip_block *ip_block) |
4fc30ea7 EQ |
551 | { |
552 | int r; | |
982d7f9b | 553 | struct amdgpu_device *adev = ip_block->adev; |
4fc30ea7 | 554 | |
bf756fb8 EQ |
555 | /* |
556 | * Proper cleanups before halting the HW engine: | |
557 | * - cancel the delayed idle work | |
558 | * - enable powergating | |
559 | * - enable clockgating | |
560 | * - disable dpm | |
561 | * | |
562 | * TODO: to align with the VCN implementation, move the | |
563 | * jobs for clockgating/powergating/dpm setting to | |
564 | * ->set_powergating_state(). | |
565 | */ | |
566 | cancel_delayed_work_sync(&adev->uvd.idle_work); | |
567 | ||
568 | if (adev->pm.dpm_enabled) { | |
569 | amdgpu_dpm_enable_uvd(adev, false); | |
570 | } else { | |
571 | amdgpu_asic_set_uvd_clocks(adev, 0, 0); | |
572 | /* shutdown the UVD block */ | |
573 | amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, | |
574 | AMD_PG_STATE_GATE); | |
575 | amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, | |
576 | AMD_CG_STATE_GATE); | |
577 | } | |
578 | ||
692d2cd1 | 579 | r = uvd_v6_0_hw_fini(ip_block); |
3f99dd81 LL |
580 | if (r) |
581 | return r; | |
582 | ||
d3daa2c7 | 583 | return amdgpu_uvd_suspend(adev); |
aaa36a97 AD |
584 | } |
585 | ||
7feb4f3a | 586 | static int uvd_v6_0_resume(struct amdgpu_ip_block *ip_block) |
aaa36a97 AD |
587 | { |
588 | int r; | |
589 | ||
58608034 | 590 | r = amdgpu_uvd_resume(ip_block->adev); |
d3daa2c7 TSD |
591 | if (r) |
592 | return r; | |
593 | ||
58608034 | 594 | return uvd_v6_0_hw_init(ip_block); |
aaa36a97 AD |
595 | } |
596 | ||
597 | /** | |
598 | * uvd_v6_0_mc_resume - memory controller programming | |
599 | * | |
600 | * @adev: amdgpu_device pointer | |
601 | * | |
602 | * Let the UVD memory controller know it's offsets | |
603 | */ | |
604 | static void uvd_v6_0_mc_resume(struct amdgpu_device *adev) | |
605 | { | |
606 | uint64_t offset; | |
607 | uint32_t size; | |
608 | ||
f349f772 | 609 | /* program memory controller bits 0-27 */ |
aaa36a97 | 610 | WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, |
2bb795f5 | 611 | lower_32_bits(adev->uvd.inst->gpu_addr)); |
aaa36a97 | 612 | WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, |
2bb795f5 | 613 | upper_32_bits(adev->uvd.inst->gpu_addr)); |
aaa36a97 AD |
614 | |
615 | offset = AMDGPU_UVD_FIRMWARE_OFFSET; | |
c1fe75c9 | 616 | size = AMDGPU_UVD_FIRMWARE_SIZE(adev); |
aaa36a97 AD |
617 | WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3); |
618 | WREG32(mmUVD_VCPU_CACHE_SIZE0, size); | |
619 | ||
620 | offset += size; | |
c0365541 | 621 | size = AMDGPU_UVD_HEAP_SIZE; |
aaa36a97 AD |
622 | WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3); |
623 | WREG32(mmUVD_VCPU_CACHE_SIZE1, size); | |
624 | ||
625 | offset += size; | |
c0365541 AN |
626 | size = AMDGPU_UVD_STACK_SIZE + |
627 | (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles); | |
aaa36a97 AD |
628 | WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3); |
629 | WREG32(mmUVD_VCPU_CACHE_SIZE2, size); | |
549300ce AD |
630 | |
631 | WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); | |
632 | WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); | |
633 | WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); | |
c0365541 AN |
634 | |
635 | WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles); | |
aaa36a97 AD |
636 | } |
637 | ||
be3ecca7 | 638 | #if 0 |
9b08a306 EH |
639 | static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev, |
640 | bool enable) | |
641 | { | |
642 | u32 data, data1; | |
643 | ||
644 | data = RREG32(mmUVD_CGC_GATE); | |
645 | data1 = RREG32(mmUVD_SUVD_CGC_GATE); | |
646 | if (enable) { | |
647 | data |= UVD_CGC_GATE__SYS_MASK | | |
648 | UVD_CGC_GATE__UDEC_MASK | | |
649 | UVD_CGC_GATE__MPEG2_MASK | | |
650 | UVD_CGC_GATE__RBC_MASK | | |
651 | UVD_CGC_GATE__LMI_MC_MASK | | |
652 | UVD_CGC_GATE__IDCT_MASK | | |
653 | UVD_CGC_GATE__MPRD_MASK | | |
654 | UVD_CGC_GATE__MPC_MASK | | |
655 | UVD_CGC_GATE__LBSI_MASK | | |
656 | UVD_CGC_GATE__LRBBM_MASK | | |
657 | UVD_CGC_GATE__UDEC_RE_MASK | | |
658 | UVD_CGC_GATE__UDEC_CM_MASK | | |
659 | UVD_CGC_GATE__UDEC_IT_MASK | | |
660 | UVD_CGC_GATE__UDEC_DB_MASK | | |
661 | UVD_CGC_GATE__UDEC_MP_MASK | | |
662 | UVD_CGC_GATE__WCB_MASK | | |
663 | UVD_CGC_GATE__VCPU_MASK | | |
664 | UVD_CGC_GATE__SCPU_MASK; | |
665 | data1 |= UVD_SUVD_CGC_GATE__SRE_MASK | | |
666 | UVD_SUVD_CGC_GATE__SIT_MASK | | |
667 | UVD_SUVD_CGC_GATE__SMP_MASK | | |
668 | UVD_SUVD_CGC_GATE__SCM_MASK | | |
669 | UVD_SUVD_CGC_GATE__SDB_MASK | | |
670 | UVD_SUVD_CGC_GATE__SRE_H264_MASK | | |
671 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK | | |
672 | UVD_SUVD_CGC_GATE__SIT_H264_MASK | | |
673 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK | | |
674 | UVD_SUVD_CGC_GATE__SCM_H264_MASK | | |
675 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK | | |
676 | UVD_SUVD_CGC_GATE__SDB_H264_MASK | | |
677 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK; | |
678 | } else { | |
679 | data &= ~(UVD_CGC_GATE__SYS_MASK | | |
680 | UVD_CGC_GATE__UDEC_MASK | | |
681 | UVD_CGC_GATE__MPEG2_MASK | | |
682 | UVD_CGC_GATE__RBC_MASK | | |
683 | UVD_CGC_GATE__LMI_MC_MASK | | |
684 | UVD_CGC_GATE__LMI_UMC_MASK | | |
685 | UVD_CGC_GATE__IDCT_MASK | | |
686 | UVD_CGC_GATE__MPRD_MASK | | |
687 | UVD_CGC_GATE__MPC_MASK | | |
688 | UVD_CGC_GATE__LBSI_MASK | | |
689 | UVD_CGC_GATE__LRBBM_MASK | | |
690 | UVD_CGC_GATE__UDEC_RE_MASK | | |
691 | UVD_CGC_GATE__UDEC_CM_MASK | | |
692 | UVD_CGC_GATE__UDEC_IT_MASK | | |
693 | UVD_CGC_GATE__UDEC_DB_MASK | | |
694 | UVD_CGC_GATE__UDEC_MP_MASK | | |
695 | UVD_CGC_GATE__WCB_MASK | | |
696 | UVD_CGC_GATE__VCPU_MASK | | |
697 | UVD_CGC_GATE__SCPU_MASK); | |
698 | data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK | | |
699 | UVD_SUVD_CGC_GATE__SIT_MASK | | |
700 | UVD_SUVD_CGC_GATE__SMP_MASK | | |
701 | UVD_SUVD_CGC_GATE__SCM_MASK | | |
702 | UVD_SUVD_CGC_GATE__SDB_MASK | | |
703 | UVD_SUVD_CGC_GATE__SRE_H264_MASK | | |
704 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK | | |
705 | UVD_SUVD_CGC_GATE__SIT_H264_MASK | | |
706 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK | | |
707 | UVD_SUVD_CGC_GATE__SCM_H264_MASK | | |
708 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK | | |
709 | UVD_SUVD_CGC_GATE__SDB_H264_MASK | | |
710 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK); | |
711 | } | |
712 | WREG32(mmUVD_CGC_GATE, data); | |
713 | WREG32(mmUVD_SUVD_CGC_GATE, data1); | |
714 | } | |
be3ecca7 | 715 | #endif |
9b08a306 | 716 | |
aaa36a97 AD |
717 | /** |
718 | * uvd_v6_0_start - start UVD block | |
719 | * | |
720 | * @adev: amdgpu_device pointer | |
721 | * | |
722 | * Setup and start the UVD block | |
723 | */ | |
724 | static int uvd_v6_0_start(struct amdgpu_device *adev) | |
725 | { | |
2bb795f5 | 726 | struct amdgpu_ring *ring = &adev->uvd.inst->ring; |
aaa36a97 AD |
727 | uint32_t rb_bufsz, tmp; |
728 | uint32_t lmi_swap_cntl; | |
729 | uint32_t mp_swap_cntl; | |
730 | int i, j, r; | |
731 | ||
f78c3422 TSD |
732 | /* disable DPG */ |
733 | WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); | |
aaa36a97 AD |
734 | |
735 | /* disable byte swapping */ | |
736 | lmi_swap_cntl = 0; | |
737 | mp_swap_cntl = 0; | |
738 | ||
739 | uvd_v6_0_mc_resume(adev); | |
740 | ||
aaa36a97 | 741 | /* disable interupt */ |
f4a7f127 | 742 | WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0); |
aaa36a97 AD |
743 | |
744 | /* stall UMC and register bus before resetting VCPU */ | |
f4a7f127 | 745 | WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1); |
aaa36a97 AD |
746 | mdelay(1); |
747 | ||
748 | /* put LMI, VCPU, RBC etc... into reset */ | |
f78c3422 TSD |
749 | WREG32(mmUVD_SOFT_RESET, |
750 | UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | | |
751 | UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | | |
752 | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | | |
753 | UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | | |
754 | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | | |
755 | UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | | |
756 | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | | |
aaa36a97 AD |
757 | UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); |
758 | mdelay(5); | |
759 | ||
760 | /* take UVD block out of reset */ | |
f4a7f127 | 761 | WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0); |
aaa36a97 AD |
762 | mdelay(5); |
763 | ||
764 | /* initialize UVD memory controller */ | |
f78c3422 TSD |
765 | WREG32(mmUVD_LMI_CTRL, |
766 | (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | | |
767 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | | |
768 | UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | | |
769 | UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | | |
770 | UVD_LMI_CTRL__REQ_MODE_MASK | | |
771 | UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK); | |
aaa36a97 AD |
772 | |
773 | #ifdef __BIG_ENDIAN | |
774 | /* swap (8 in 32) RB and IB */ | |
775 | lmi_swap_cntl = 0xa; | |
776 | mp_swap_cntl = 0; | |
777 | #endif | |
778 | WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); | |
779 | WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); | |
780 | ||
781 | WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); | |
782 | WREG32(mmUVD_MPC_SET_MUXA1, 0x0); | |
783 | WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); | |
784 | WREG32(mmUVD_MPC_SET_MUXB1, 0x0); | |
785 | WREG32(mmUVD_MPC_SET_ALU, 0); | |
786 | WREG32(mmUVD_MPC_SET_MUX, 0x88); | |
787 | ||
788 | /* take all subblocks out of reset, except VCPU */ | |
789 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | |
790 | mdelay(5); | |
791 | ||
792 | /* enable VCPU clock */ | |
f78c3422 | 793 | WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK); |
aaa36a97 AD |
794 | |
795 | /* enable UMC */ | |
f4a7f127 | 796 | WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0); |
aaa36a97 AD |
797 | |
798 | /* boot up the VCPU */ | |
799 | WREG32(mmUVD_SOFT_RESET, 0); | |
800 | mdelay(10); | |
801 | ||
802 | for (i = 0; i < 10; ++i) { | |
803 | uint32_t status; | |
804 | ||
805 | for (j = 0; j < 100; ++j) { | |
806 | status = RREG32(mmUVD_STATUS); | |
807 | if (status & 2) | |
808 | break; | |
809 | mdelay(10); | |
810 | } | |
811 | r = 0; | |
812 | if (status & 2) | |
813 | break; | |
814 | ||
815 | DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); | |
f4a7f127 | 816 | WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1); |
aaa36a97 | 817 | mdelay(10); |
f4a7f127 | 818 | WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0); |
aaa36a97 AD |
819 | mdelay(10); |
820 | r = -1; | |
821 | } | |
822 | ||
823 | if (r) { | |
824 | DRM_ERROR("UVD not responding, giving up!!!\n"); | |
825 | return r; | |
826 | } | |
827 | /* enable master interrupt */ | |
f78c3422 TSD |
828 | WREG32_P(mmUVD_MASTINT_EN, |
829 | (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), | |
830 | ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK)); | |
aaa36a97 AD |
831 | |
832 | /* clear the bit 4 of UVD_STATUS */ | |
f78c3422 | 833 | WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); |
aaa36a97 | 834 | |
f4a7f127 | 835 | /* force RBC into idle state */ |
aaa36a97 | 836 | rb_bufsz = order_base_2(ring->ring_size); |
f4a7f127 | 837 | tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); |
aaa36a97 AD |
838 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); |
839 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); | |
840 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); | |
841 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); | |
842 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); | |
aaa36a97 AD |
843 | WREG32(mmUVD_RBC_RB_CNTL, tmp); |
844 | ||
845 | /* set the write pointer delay */ | |
846 | WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); | |
847 | ||
848 | /* set the wb address */ | |
849 | WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2)); | |
850 | ||
f349f772 | 851 | /* program the RB_BASE for ring buffer */ |
aaa36a97 AD |
852 | WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, |
853 | lower_32_bits(ring->gpu_addr)); | |
854 | WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, | |
855 | upper_32_bits(ring->gpu_addr)); | |
856 | ||
857 | /* Initialize the ring buffer's read and write pointers */ | |
858 | WREG32(mmUVD_RBC_RB_RPTR, 0); | |
859 | ||
860 | ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); | |
536fbf94 | 861 | WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); |
aaa36a97 | 862 | |
f4a7f127 | 863 | WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0); |
aaa36a97 | 864 | |
06a7e9cb | 865 | if (uvd_v6_0_enc_support(adev)) { |
2bb795f5 | 866 | ring = &adev->uvd.inst->ring_enc[0]; |
06a7e9cb JZ |
867 | WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); |
868 | WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); | |
869 | WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr); | |
870 | WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); | |
871 | WREG32(mmUVD_RB_SIZE, ring->ring_size / 4); | |
872 | ||
2bb795f5 | 873 | ring = &adev->uvd.inst->ring_enc[1]; |
06a7e9cb JZ |
874 | WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); |
875 | WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); | |
876 | WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr); | |
877 | WREG32(mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); | |
878 | WREG32(mmUVD_RB_SIZE2, ring->ring_size / 4); | |
879 | } | |
880 | ||
aaa36a97 AD |
881 | return 0; |
882 | } | |
883 | ||
884 | /** | |
885 | * uvd_v6_0_stop - stop UVD block | |
886 | * | |
887 | * @adev: amdgpu_device pointer | |
888 | * | |
889 | * stop the UVD block | |
890 | */ | |
891 | static void uvd_v6_0_stop(struct amdgpu_device *adev) | |
892 | { | |
893 | /* force RBC into idle state */ | |
894 | WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); | |
895 | ||
896 | /* Stall UMC and register bus before resetting VCPU */ | |
897 | WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); | |
898 | mdelay(1); | |
899 | ||
900 | /* put VCPU into reset */ | |
901 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | |
902 | mdelay(5); | |
903 | ||
904 | /* disable VCPU clock */ | |
905 | WREG32(mmUVD_VCPU_CNTL, 0x0); | |
906 | ||
907 | /* Unstall UMC and register bus */ | |
908 | WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); | |
e3e672e6 RZ |
909 | |
910 | WREG32(mmUVD_STATUS, 0); | |
aaa36a97 AD |
911 | } |
912 | ||
913 | /** | |
914 | * uvd_v6_0_ring_emit_fence - emit an fence & trap command | |
915 | * | |
916 | * @ring: amdgpu_ring pointer | |
166c2089 LJ |
917 | * @addr: address |
918 | * @seq: sequence number | |
919 | * @flags: fence related flags | |
aaa36a97 AD |
920 | * |
921 | * Write a fence and a trap command to the ring. | |
922 | */ | |
923 | static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, | |
890ee23f | 924 | unsigned flags) |
aaa36a97 | 925 | { |
890ee23f | 926 | WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); |
aaa36a97 AD |
927 | |
928 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); | |
929 | amdgpu_ring_write(ring, seq); | |
930 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); | |
931 | amdgpu_ring_write(ring, addr & 0xffffffff); | |
932 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); | |
933 | amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); | |
934 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); | |
935 | amdgpu_ring_write(ring, 0); | |
936 | ||
937 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); | |
938 | amdgpu_ring_write(ring, 0); | |
939 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); | |
940 | amdgpu_ring_write(ring, 0); | |
941 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); | |
942 | amdgpu_ring_write(ring, 2); | |
943 | } | |
944 | ||
c0f2f2e6 JZ |
945 | /** |
946 | * uvd_v6_0_enc_ring_emit_fence - emit an enc fence & trap command | |
947 | * | |
948 | * @ring: amdgpu_ring pointer | |
166c2089 LJ |
949 | * @addr: address |
950 | * @seq: sequence number | |
951 | * @flags: fence related flags | |
c0f2f2e6 JZ |
952 | * |
953 | * Write enc a fence and a trap command to the ring. | |
954 | */ | |
955 | static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, | |
956 | u64 seq, unsigned flags) | |
957 | { | |
958 | WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); | |
959 | ||
960 | amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE); | |
961 | amdgpu_ring_write(ring, addr); | |
962 | amdgpu_ring_write(ring, upper_32_bits(addr)); | |
963 | amdgpu_ring_write(ring, seq); | |
964 | amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP); | |
965 | } | |
966 | ||
996cab95 CK |
967 | /** |
968 | * uvd_v6_0_ring_emit_hdp_flush - skip HDP flushing | |
969 | * | |
970 | * @ring: amdgpu_ring pointer | |
971 | */ | |
972 | static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) | |
973 | { | |
974 | /* The firmware doesn't seem to like touching registers at this point. */ | |
975 | } | |
976 | ||
aaa36a97 AD |
977 | /** |
978 | * uvd_v6_0_ring_test_ring - register write test | |
979 | * | |
980 | * @ring: amdgpu_ring pointer | |
981 | * | |
982 | * Test if we can successfully write to the context register | |
983 | */ | |
984 | static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) | |
985 | { | |
986 | struct amdgpu_device *adev = ring->adev; | |
987 | uint32_t tmp = 0; | |
988 | unsigned i; | |
989 | int r; | |
990 | ||
991 | WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); | |
a27de35c | 992 | r = amdgpu_ring_alloc(ring, 3); |
725b2611 | 993 | if (r) |
aaa36a97 | 994 | return r; |
725b2611 | 995 | |
aaa36a97 AD |
996 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); |
997 | amdgpu_ring_write(ring, 0xDEADBEEF); | |
a27de35c | 998 | amdgpu_ring_commit(ring); |
aaa36a97 AD |
999 | for (i = 0; i < adev->usec_timeout; i++) { |
1000 | tmp = RREG32(mmUVD_CONTEXT_ID); | |
1001 | if (tmp == 0xDEADBEEF) | |
1002 | break; | |
c366be54 | 1003 | udelay(1); |
aaa36a97 AD |
1004 | } |
1005 | ||
725b2611 CK |
1006 | if (i >= adev->usec_timeout) |
1007 | r = -ETIMEDOUT; | |
1008 | ||
aaa36a97 AD |
1009 | return r; |
1010 | } | |
1011 | ||
1012 | /** | |
1013 | * uvd_v6_0_ring_emit_ib - execute indirect buffer | |
1014 | * | |
1015 | * @ring: amdgpu_ring pointer | |
166c2089 | 1016 | * @job: job to retrieve vmid from |
aaa36a97 | 1017 | * @ib: indirect buffer to execute |
166c2089 | 1018 | * @flags: unused |
aaa36a97 AD |
1019 | * |
1020 | * Write ring commands to execute the indirect buffer | |
1021 | */ | |
1022 | static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, | |
34955e03 | 1023 | struct amdgpu_job *job, |
d88bf583 | 1024 | struct amdgpu_ib *ib, |
c4c905ec | 1025 | uint32_t flags) |
aaa36a97 | 1026 | { |
34955e03 RZ |
1027 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); |
1028 | ||
0f30a397 | 1029 | amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0)); |
c4f46f22 | 1030 | amdgpu_ring_write(ring, vmid); |
0f30a397 | 1031 | |
aaa36a97 AD |
1032 | amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); |
1033 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); | |
1034 | amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0)); | |
1035 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); | |
1036 | amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); | |
1037 | amdgpu_ring_write(ring, ib->length_dw); | |
1038 | } | |
1039 | ||
c0f2f2e6 JZ |
1040 | /** |
1041 | * uvd_v6_0_enc_ring_emit_ib - enc execute indirect buffer | |
1042 | * | |
1043 | * @ring: amdgpu_ring pointer | |
166c2089 | 1044 | * @job: job to retrive vmid from |
c0f2f2e6 | 1045 | * @ib: indirect buffer to execute |
166c2089 | 1046 | * @flags: unused |
c0f2f2e6 JZ |
1047 | * |
1048 | * Write enc ring commands to execute the indirect buffer | |
1049 | */ | |
1050 | static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring, | |
34955e03 RZ |
1051 | struct amdgpu_job *job, |
1052 | struct amdgpu_ib *ib, | |
c4c905ec | 1053 | uint32_t flags) |
c0f2f2e6 | 1054 | { |
34955e03 RZ |
1055 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); |
1056 | ||
c0f2f2e6 | 1057 | amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM); |
c4f46f22 | 1058 | amdgpu_ring_write(ring, vmid); |
c0f2f2e6 JZ |
1059 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); |
1060 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); | |
1061 | amdgpu_ring_write(ring, ib->length_dw); | |
1062 | } | |
1063 | ||
25299898 CK |
1064 | static void uvd_v6_0_ring_emit_wreg(struct amdgpu_ring *ring, |
1065 | uint32_t reg, uint32_t val) | |
1066 | { | |
1067 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); | |
1068 | amdgpu_ring_write(ring, reg << 2); | |
1069 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); | |
1070 | amdgpu_ring_write(ring, val); | |
1071 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); | |
1072 | amdgpu_ring_write(ring, 0x8); | |
1073 | } | |
1074 | ||
0f30a397 | 1075 | static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, |
c633c00b | 1076 | unsigned vmid, uint64_t pd_addr) |
0f30a397 | 1077 | { |
c633c00b | 1078 | amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); |
0f30a397 CK |
1079 | |
1080 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); | |
1081 | amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); | |
1082 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); | |
1083 | amdgpu_ring_write(ring, 0); | |
1084 | amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0)); | |
c4f46f22 | 1085 | amdgpu_ring_write(ring, 1 << vmid); /* mask */ |
0f30a397 CK |
1086 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); |
1087 | amdgpu_ring_write(ring, 0xC); | |
1088 | } | |
1089 | ||
1090 | static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) | |
1091 | { | |
1092 | uint32_t seq = ring->fence_drv.sync_seq; | |
1093 | uint64_t addr = ring->fence_drv.gpu_addr; | |
1094 | ||
1095 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); | |
1096 | amdgpu_ring_write(ring, lower_32_bits(addr)); | |
1097 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); | |
1098 | amdgpu_ring_write(ring, upper_32_bits(addr)); | |
1099 | amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0)); | |
1100 | amdgpu_ring_write(ring, 0xffffffff); /* mask */ | |
1101 | amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0)); | |
1102 | amdgpu_ring_write(ring, seq); | |
1103 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); | |
1104 | amdgpu_ring_write(ring, 0xE); | |
1105 | } | |
1106 | ||
1aac3c91 LL |
1107 | static void uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) |
1108 | { | |
1109 | int i; | |
1110 | ||
1111 | WARN_ON(ring->wptr % 2 || count % 2); | |
1112 | ||
1113 | for (i = 0; i < count / 2; i++) { | |
1114 | amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0)); | |
1115 | amdgpu_ring_write(ring, 0); | |
1116 | } | |
1117 | } | |
1118 | ||
c0f2f2e6 JZ |
1119 | static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring) |
1120 | { | |
1121 | uint32_t seq = ring->fence_drv.sync_seq; | |
1122 | uint64_t addr = ring->fence_drv.gpu_addr; | |
1123 | ||
1124 | amdgpu_ring_write(ring, HEVC_ENC_CMD_WAIT_GE); | |
1125 | amdgpu_ring_write(ring, lower_32_bits(addr)); | |
1126 | amdgpu_ring_write(ring, upper_32_bits(addr)); | |
1127 | amdgpu_ring_write(ring, seq); | |
1128 | } | |
1129 | ||
1130 | static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring) | |
1131 | { | |
1132 | amdgpu_ring_write(ring, HEVC_ENC_CMD_END); | |
1133 | } | |
1134 | ||
1135 | static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, | |
c633c00b | 1136 | unsigned int vmid, uint64_t pd_addr) |
c0f2f2e6 JZ |
1137 | { |
1138 | amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB); | |
c4f46f22 | 1139 | amdgpu_ring_write(ring, vmid); |
c0f2f2e6 JZ |
1140 | amdgpu_ring_write(ring, pd_addr >> 12); |
1141 | ||
1142 | amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB); | |
c4f46f22 | 1143 | amdgpu_ring_write(ring, vmid); |
c0f2f2e6 JZ |
1144 | } |
1145 | ||
7dc34054 | 1146 | static bool uvd_v6_0_is_idle(struct amdgpu_ip_block *ip_block) |
aaa36a97 | 1147 | { |
7dc34054 | 1148 | struct amdgpu_device *adev = ip_block->adev; |
5fc3aeeb | 1149 | |
aaa36a97 AD |
1150 | return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); |
1151 | } | |
1152 | ||
82ae6619 | 1153 | static int uvd_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block) |
aaa36a97 AD |
1154 | { |
1155 | unsigned i; | |
82ae6619 | 1156 | struct amdgpu_device *adev = ip_block->adev; |
aaa36a97 AD |
1157 | |
1158 | for (i = 0; i < adev->usec_timeout; i++) { | |
7dc34054 | 1159 | if (uvd_v6_0_is_idle(ip_block)) |
aaa36a97 AD |
1160 | return 0; |
1161 | } | |
1162 | return -ETIMEDOUT; | |
1163 | } | |
1164 | ||
fc0b3b90 | 1165 | #define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd |
6a9456e0 | 1166 | static bool uvd_v6_0_check_soft_reset(struct amdgpu_ip_block *ip_block) |
fc0b3b90 | 1167 | { |
6a9456e0 | 1168 | struct amdgpu_device *adev = ip_block->adev; |
fc0b3b90 CZ |
1169 | u32 srbm_soft_reset = 0; |
1170 | u32 tmp = RREG32(mmSRBM_STATUS); | |
1171 | ||
1172 | if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) || | |
1173 | REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) || | |
1174 | (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK)) | |
1175 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); | |
1176 | ||
1177 | if (srbm_soft_reset) { | |
2bb795f5 | 1178 | adev->uvd.inst->srbm_soft_reset = srbm_soft_reset; |
da146d3b | 1179 | return true; |
fc0b3b90 | 1180 | } else { |
2bb795f5 | 1181 | adev->uvd.inst->srbm_soft_reset = 0; |
da146d3b | 1182 | return false; |
fc0b3b90 | 1183 | } |
fc0b3b90 | 1184 | } |
da146d3b | 1185 | |
9d5ee7ce | 1186 | static int uvd_v6_0_pre_soft_reset(struct amdgpu_ip_block *ip_block) |
aaa36a97 | 1187 | { |
9d5ee7ce | 1188 | struct amdgpu_device *adev = ip_block->adev; |
5fc3aeeb | 1189 | |
2bb795f5 | 1190 | if (!adev->uvd.inst->srbm_soft_reset) |
fc0b3b90 CZ |
1191 | return 0; |
1192 | ||
aaa36a97 | 1193 | uvd_v6_0_stop(adev); |
fc0b3b90 CZ |
1194 | return 0; |
1195 | } | |
1196 | ||
0ef2a1e7 | 1197 | static int uvd_v6_0_soft_reset(struct amdgpu_ip_block *ip_block) |
fc0b3b90 | 1198 | { |
0ef2a1e7 | 1199 | struct amdgpu_device *adev = ip_block->adev; |
fc0b3b90 CZ |
1200 | u32 srbm_soft_reset; |
1201 | ||
2bb795f5 | 1202 | if (!adev->uvd.inst->srbm_soft_reset) |
fc0b3b90 | 1203 | return 0; |
2bb795f5 | 1204 | srbm_soft_reset = adev->uvd.inst->srbm_soft_reset; |
fc0b3b90 CZ |
1205 | |
1206 | if (srbm_soft_reset) { | |
1207 | u32 tmp; | |
1208 | ||
1209 | tmp = RREG32(mmSRBM_SOFT_RESET); | |
1210 | tmp |= srbm_soft_reset; | |
1211 | dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); | |
1212 | WREG32(mmSRBM_SOFT_RESET, tmp); | |
1213 | tmp = RREG32(mmSRBM_SOFT_RESET); | |
1214 | ||
1215 | udelay(50); | |
1216 | ||
1217 | tmp &= ~srbm_soft_reset; | |
1218 | WREG32(mmSRBM_SOFT_RESET, tmp); | |
1219 | tmp = RREG32(mmSRBM_SOFT_RESET); | |
1220 | ||
1221 | /* Wait a little for things to settle down */ | |
1222 | udelay(50); | |
1223 | } | |
1224 | ||
1225 | return 0; | |
1226 | } | |
1227 | ||
e15ec812 | 1228 | static int uvd_v6_0_post_soft_reset(struct amdgpu_ip_block *ip_block) |
fc0b3b90 | 1229 | { |
e15ec812 | 1230 | struct amdgpu_device *adev = ip_block->adev; |
fc0b3b90 | 1231 | |
2bb795f5 | 1232 | if (!adev->uvd.inst->srbm_soft_reset) |
fc0b3b90 | 1233 | return 0; |
aaa36a97 | 1234 | |
aaa36a97 AD |
1235 | mdelay(5); |
1236 | ||
1237 | return uvd_v6_0_start(adev); | |
1238 | } | |
1239 | ||
aaa36a97 AD |
1240 | static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev, |
1241 | struct amdgpu_irq_src *source, | |
1242 | unsigned type, | |
1243 | enum amdgpu_interrupt_state state) | |
1244 | { | |
1245 | // TODO | |
1246 | return 0; | |
1247 | } | |
1248 | ||
1249 | static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev, | |
1250 | struct amdgpu_irq_src *source, | |
1251 | struct amdgpu_iv_entry *entry) | |
1252 | { | |
65da0d40 | 1253 | bool int_handled = true; |
aaa36a97 | 1254 | DRM_DEBUG("IH: UVD TRAP\n"); |
65da0d40 JZ |
1255 | |
1256 | switch (entry->src_id) { | |
1257 | case 124: | |
2bb795f5 | 1258 | amdgpu_fence_process(&adev->uvd.inst->ring); |
65da0d40 JZ |
1259 | break; |
1260 | case 119: | |
1261 | if (likely(uvd_v6_0_enc_support(adev))) | |
2bb795f5 | 1262 | amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]); |
65da0d40 JZ |
1263 | else |
1264 | int_handled = false; | |
1265 | break; | |
1266 | case 120: | |
1267 | if (likely(uvd_v6_0_enc_support(adev))) | |
2bb795f5 | 1268 | amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]); |
65da0d40 JZ |
1269 | else |
1270 | int_handled = false; | |
1271 | break; | |
1272 | } | |
1273 | ||
3d0c75af ZB |
1274 | if (!int_handled) |
1275 | DRM_ERROR("Unhandled interrupt: %d %d\n", | |
65da0d40 JZ |
1276 | entry->src_id, entry->src_data[0]); |
1277 | ||
aaa36a97 AD |
1278 | return 0; |
1279 | } | |
1280 | ||
805b3ba8 RZ |
1281 | static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool enable) |
1282 | { | |
1283 | uint32_t data1, data3; | |
1284 | ||
1285 | data1 = RREG32(mmUVD_SUVD_CGC_GATE); | |
1286 | data3 = RREG32(mmUVD_CGC_GATE); | |
1287 | ||
1288 | data1 |= UVD_SUVD_CGC_GATE__SRE_MASK | | |
1289 | UVD_SUVD_CGC_GATE__SIT_MASK | | |
1290 | UVD_SUVD_CGC_GATE__SMP_MASK | | |
1291 | UVD_SUVD_CGC_GATE__SCM_MASK | | |
1292 | UVD_SUVD_CGC_GATE__SDB_MASK | | |
1293 | UVD_SUVD_CGC_GATE__SRE_H264_MASK | | |
1294 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK | | |
1295 | UVD_SUVD_CGC_GATE__SIT_H264_MASK | | |
1296 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK | | |
1297 | UVD_SUVD_CGC_GATE__SCM_H264_MASK | | |
1298 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK | | |
1299 | UVD_SUVD_CGC_GATE__SDB_H264_MASK | | |
1300 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK; | |
1301 | ||
1302 | if (enable) { | |
1303 | data3 |= (UVD_CGC_GATE__SYS_MASK | | |
1304 | UVD_CGC_GATE__UDEC_MASK | | |
1305 | UVD_CGC_GATE__MPEG2_MASK | | |
1306 | UVD_CGC_GATE__RBC_MASK | | |
1307 | UVD_CGC_GATE__LMI_MC_MASK | | |
1308 | UVD_CGC_GATE__LMI_UMC_MASK | | |
1309 | UVD_CGC_GATE__IDCT_MASK | | |
1310 | UVD_CGC_GATE__MPRD_MASK | | |
1311 | UVD_CGC_GATE__MPC_MASK | | |
1312 | UVD_CGC_GATE__LBSI_MASK | | |
1313 | UVD_CGC_GATE__LRBBM_MASK | | |
1314 | UVD_CGC_GATE__UDEC_RE_MASK | | |
1315 | UVD_CGC_GATE__UDEC_CM_MASK | | |
1316 | UVD_CGC_GATE__UDEC_IT_MASK | | |
1317 | UVD_CGC_GATE__UDEC_DB_MASK | | |
1318 | UVD_CGC_GATE__UDEC_MP_MASK | | |
1319 | UVD_CGC_GATE__WCB_MASK | | |
805b3ba8 RZ |
1320 | UVD_CGC_GATE__JPEG_MASK | |
1321 | UVD_CGC_GATE__SCPU_MASK | | |
1322 | UVD_CGC_GATE__JPEG2_MASK); | |
3c3a7e61 RZ |
1323 | /* only in pg enabled, we can gate clock to vcpu*/ |
1324 | if (adev->pg_flags & AMD_PG_SUPPORT_UVD) | |
1325 | data3 |= UVD_CGC_GATE__VCPU_MASK; | |
1326 | ||
805b3ba8 RZ |
1327 | data3 &= ~UVD_CGC_GATE__REGS_MASK; |
1328 | } else { | |
1329 | data3 = 0; | |
1330 | } | |
1331 | ||
1332 | WREG32(mmUVD_SUVD_CGC_GATE, data1); | |
1333 | WREG32(mmUVD_CGC_GATE, data3); | |
1334 | } | |
1335 | ||
be3ecca7 TSD |
1336 | static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev) |
1337 | { | |
805b3ba8 | 1338 | uint32_t data, data2; |
be3ecca7 TSD |
1339 | |
1340 | data = RREG32(mmUVD_CGC_CTRL); | |
be3ecca7 TSD |
1341 | data2 = RREG32(mmUVD_SUVD_CGC_CTRL); |
1342 | ||
805b3ba8 | 1343 | |
be3ecca7 TSD |
1344 | data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | |
1345 | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); | |
1346 | ||
be3ecca7 TSD |
1347 | |
1348 | data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | | |
1349 | (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) | | |
1350 | (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY)); | |
1351 | ||
1352 | data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | | |
1353 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK | | |
1354 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK | | |
1355 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK | | |
1356 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK | | |
1357 | UVD_CGC_CTRL__SYS_MODE_MASK | | |
1358 | UVD_CGC_CTRL__UDEC_MODE_MASK | | |
1359 | UVD_CGC_CTRL__MPEG2_MODE_MASK | | |
1360 | UVD_CGC_CTRL__REGS_MODE_MASK | | |
1361 | UVD_CGC_CTRL__RBC_MODE_MASK | | |
1362 | UVD_CGC_CTRL__LMI_MC_MODE_MASK | | |
1363 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK | | |
1364 | UVD_CGC_CTRL__IDCT_MODE_MASK | | |
1365 | UVD_CGC_CTRL__MPRD_MODE_MASK | | |
1366 | UVD_CGC_CTRL__MPC_MODE_MASK | | |
1367 | UVD_CGC_CTRL__LBSI_MODE_MASK | | |
1368 | UVD_CGC_CTRL__LRBBM_MODE_MASK | | |
1369 | UVD_CGC_CTRL__WCB_MODE_MASK | | |
1370 | UVD_CGC_CTRL__VCPU_MODE_MASK | | |
1371 | UVD_CGC_CTRL__JPEG_MODE_MASK | | |
1372 | UVD_CGC_CTRL__SCPU_MODE_MASK | | |
1373 | UVD_CGC_CTRL__JPEG2_MODE_MASK); | |
1374 | data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK | | |
1375 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK | | |
1376 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK | | |
1377 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK | | |
1378 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK); | |
be3ecca7 TSD |
1379 | |
1380 | WREG32(mmUVD_CGC_CTRL, data); | |
be3ecca7 TSD |
1381 | WREG32(mmUVD_SUVD_CGC_CTRL, data2); |
1382 | } | |
1383 | ||
1384 | #if 0 | |
1385 | static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev) | |
1386 | { | |
1387 | uint32_t data, data1, cgc_flags, suvd_flags; | |
1388 | ||
1389 | data = RREG32(mmUVD_CGC_GATE); | |
1390 | data1 = RREG32(mmUVD_SUVD_CGC_GATE); | |
1391 | ||
1392 | cgc_flags = UVD_CGC_GATE__SYS_MASK | | |
1393 | UVD_CGC_GATE__UDEC_MASK | | |
1394 | UVD_CGC_GATE__MPEG2_MASK | | |
1395 | UVD_CGC_GATE__RBC_MASK | | |
1396 | UVD_CGC_GATE__LMI_MC_MASK | | |
1397 | UVD_CGC_GATE__IDCT_MASK | | |
1398 | UVD_CGC_GATE__MPRD_MASK | | |
1399 | UVD_CGC_GATE__MPC_MASK | | |
1400 | UVD_CGC_GATE__LBSI_MASK | | |
1401 | UVD_CGC_GATE__LRBBM_MASK | | |
1402 | UVD_CGC_GATE__UDEC_RE_MASK | | |
1403 | UVD_CGC_GATE__UDEC_CM_MASK | | |
1404 | UVD_CGC_GATE__UDEC_IT_MASK | | |
1405 | UVD_CGC_GATE__UDEC_DB_MASK | | |
1406 | UVD_CGC_GATE__UDEC_MP_MASK | | |
1407 | UVD_CGC_GATE__WCB_MASK | | |
1408 | UVD_CGC_GATE__VCPU_MASK | | |
1409 | UVD_CGC_GATE__SCPU_MASK | | |
1410 | UVD_CGC_GATE__JPEG_MASK | | |
1411 | UVD_CGC_GATE__JPEG2_MASK; | |
1412 | ||
1413 | suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | | |
1414 | UVD_SUVD_CGC_GATE__SIT_MASK | | |
1415 | UVD_SUVD_CGC_GATE__SMP_MASK | | |
1416 | UVD_SUVD_CGC_GATE__SCM_MASK | | |
1417 | UVD_SUVD_CGC_GATE__SDB_MASK; | |
1418 | ||
1419 | data |= cgc_flags; | |
1420 | data1 |= suvd_flags; | |
1421 | ||
1422 | WREG32(mmUVD_CGC_GATE, data); | |
1423 | WREG32(mmUVD_SUVD_CGC_GATE, data1); | |
1424 | } | |
1425 | #endif | |
1426 | ||
805b3ba8 RZ |
1427 | static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev, |
1428 | bool enable) | |
1429 | { | |
1430 | u32 orig, data; | |
1431 | ||
1432 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) { | |
1433 | data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); | |
1434 | data |= 0xfff; | |
1435 | WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); | |
1436 | ||
1437 | orig = data = RREG32(mmUVD_CGC_CTRL); | |
1438 | data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; | |
1439 | if (orig != data) | |
1440 | WREG32(mmUVD_CGC_CTRL, data); | |
1441 | } else { | |
1442 | data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); | |
1443 | data &= ~0xfff; | |
1444 | WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); | |
1445 | ||
1446 | orig = data = RREG32(mmUVD_CGC_CTRL); | |
1447 | data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; | |
1448 | if (orig != data) | |
1449 | WREG32(mmUVD_CGC_CTRL, data); | |
1450 | } | |
1451 | } | |
1452 | ||
f2ba8c3d | 1453 | static int uvd_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, |
5fc3aeeb | 1454 | enum amd_clockgating_state state) |
aaa36a97 | 1455 | { |
f2ba8c3d | 1456 | struct amdgpu_device *adev = ip_block->adev; |
a9d4fe2f | 1457 | bool enable = (state == AMD_CG_STATE_GATE); |
a0cdef9e | 1458 | |
4be5097c | 1459 | if (enable) { |
be3ecca7 | 1460 | /* wait for STATUS to clear */ |
82ae6619 | 1461 | if (uvd_v6_0_wait_for_idle(ip_block)) |
be3ecca7 | 1462 | return -EBUSY; |
805b3ba8 | 1463 | uvd_v6_0_enable_clock_gating(adev, true); |
be3ecca7 TSD |
1464 | /* enable HW gates because UVD is idle */ |
1465 | /* uvd_v6_0_set_hw_clock_gating(adev); */ | |
805b3ba8 RZ |
1466 | } else { |
1467 | /* disable HW gating and enable Sw gating */ | |
1468 | uvd_v6_0_enable_clock_gating(adev, false); | |
9b08a306 | 1469 | } |
805b3ba8 | 1470 | uvd_v6_0_set_sw_clock_gating(adev); |
aaa36a97 AD |
1471 | return 0; |
1472 | } | |
1473 | ||
80d80511 | 1474 | static int uvd_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block, |
5fc3aeeb | 1475 | enum amd_powergating_state state) |
aaa36a97 AD |
1476 | { |
1477 | /* This doesn't actually powergate the UVD block. | |
1478 | * That's done in the dpm code via the SMC. This | |
1479 | * just re-inits the block as necessary. The actual | |
1480 | * gating still happens in the dpm code. We should | |
1481 | * revisit this when there is a cleaner line between | |
1482 | * the smc and the hw blocks | |
1483 | */ | |
80d80511 | 1484 | struct amdgpu_device *adev = ip_block->adev; |
c8781f56 | 1485 | int ret = 0; |
5fc3aeeb | 1486 | |
fa5d2e0c TSD |
1487 | WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK); |
1488 | ||
5fc3aeeb | 1489 | if (state == AMD_PG_STATE_GATE) { |
aaa36a97 | 1490 | uvd_v6_0_stop(adev); |
aaa36a97 | 1491 | } else { |
c8781f56 HR |
1492 | ret = uvd_v6_0_start(adev); |
1493 | if (ret) | |
1494 | goto out; | |
c8781f56 HR |
1495 | } |
1496 | ||
1497 | out: | |
1498 | return ret; | |
1499 | } | |
1500 | ||
3521276a | 1501 | static void uvd_v6_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags) |
c8781f56 | 1502 | { |
3521276a | 1503 | struct amdgpu_device *adev = ip_block->adev; |
c8781f56 HR |
1504 | int data; |
1505 | ||
1506 | mutex_lock(&adev->pm.mutex); | |
1507 | ||
1c622002 RZ |
1508 | if (adev->flags & AMD_IS_APU) |
1509 | data = RREG32_SMC(ixCURRENT_PG_STATUS_APU); | |
1510 | else | |
1511 | data = RREG32_SMC(ixCURRENT_PG_STATUS); | |
1512 | ||
1513 | if (data & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { | |
c8781f56 HR |
1514 | DRM_INFO("Cannot get clockgating state when UVD is powergated.\n"); |
1515 | goto out; | |
aaa36a97 | 1516 | } |
c8781f56 HR |
1517 | |
1518 | /* AMD_CG_SUPPORT_UVD_MGCG */ | |
1519 | data = RREG32(mmUVD_CGC_CTRL); | |
1520 | if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK) | |
1521 | *flags |= AMD_CG_SUPPORT_UVD_MGCG; | |
1522 | ||
1523 | out: | |
1524 | mutex_unlock(&adev->pm.mutex); | |
aaa36a97 AD |
1525 | } |
1526 | ||
a1255107 | 1527 | static const struct amd_ip_funcs uvd_v6_0_ip_funcs = { |
88a907d6 | 1528 | .name = "uvd_v6_0", |
aaa36a97 | 1529 | .early_init = uvd_v6_0_early_init, |
aaa36a97 AD |
1530 | .sw_init = uvd_v6_0_sw_init, |
1531 | .sw_fini = uvd_v6_0_sw_fini, | |
1532 | .hw_init = uvd_v6_0_hw_init, | |
1533 | .hw_fini = uvd_v6_0_hw_fini, | |
db998890 | 1534 | .prepare_suspend = uvd_v6_0_prepare_suspend, |
aaa36a97 AD |
1535 | .suspend = uvd_v6_0_suspend, |
1536 | .resume = uvd_v6_0_resume, | |
1537 | .is_idle = uvd_v6_0_is_idle, | |
1538 | .wait_for_idle = uvd_v6_0_wait_for_idle, | |
fc0b3b90 CZ |
1539 | .check_soft_reset = uvd_v6_0_check_soft_reset, |
1540 | .pre_soft_reset = uvd_v6_0_pre_soft_reset, | |
aaa36a97 | 1541 | .soft_reset = uvd_v6_0_soft_reset, |
fc0b3b90 | 1542 | .post_soft_reset = uvd_v6_0_post_soft_reset, |
aaa36a97 AD |
1543 | .set_clockgating_state = uvd_v6_0_set_clockgating_state, |
1544 | .set_powergating_state = uvd_v6_0_set_powergating_state, | |
c8781f56 | 1545 | .get_clockgating_state = uvd_v6_0_get_clockgating_state, |
aaa36a97 AD |
1546 | }; |
1547 | ||
0f30a397 | 1548 | static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { |
21cd942e | 1549 | .type = AMDGPU_RING_TYPE_UVD, |
79887142 | 1550 | .align_mask = 0xf, |
536fbf94 | 1551 | .support_64bit_ptrs = false, |
7ee250b1 | 1552 | .no_user_fence = true, |
aaa36a97 AD |
1553 | .get_rptr = uvd_v6_0_ring_get_rptr, |
1554 | .get_wptr = uvd_v6_0_ring_get_wptr, | |
1555 | .set_wptr = uvd_v6_0_ring_set_wptr, | |
1556 | .parse_cs = amdgpu_uvd_ring_parse_cs, | |
e12f3d7a | 1557 | .emit_frame_size = |
996cab95 | 1558 | 6 + /* hdp invalidate */ |
e12f3d7a CK |
1559 | 10 + /* uvd_v6_0_ring_emit_pipeline_sync */ |
1560 | 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */ | |
1561 | .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */ | |
aaa36a97 AD |
1562 | .emit_ib = uvd_v6_0_ring_emit_ib, |
1563 | .emit_fence = uvd_v6_0_ring_emit_fence, | |
996cab95 | 1564 | .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, |
aaa36a97 | 1565 | .test_ring = uvd_v6_0_ring_test_ring, |
8de190c9 | 1566 | .test_ib = amdgpu_uvd_ring_test_ib, |
1aac3c91 | 1567 | .insert_nop = uvd_v6_0_ring_insert_nop, |
9e5d5309 | 1568 | .pad_ib = amdgpu_ring_generic_pad_ib, |
c4120d55 CK |
1569 | .begin_use = amdgpu_uvd_ring_begin_use, |
1570 | .end_use = amdgpu_uvd_ring_end_use, | |
49135593 | 1571 | .emit_wreg = uvd_v6_0_ring_emit_wreg, |
aaa36a97 AD |
1572 | }; |
1573 | ||
0f30a397 | 1574 | static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = { |
21cd942e | 1575 | .type = AMDGPU_RING_TYPE_UVD, |
79887142 | 1576 | .align_mask = 0xf, |
536fbf94 | 1577 | .support_64bit_ptrs = false, |
7ee250b1 | 1578 | .no_user_fence = true, |
0f30a397 CK |
1579 | .get_rptr = uvd_v6_0_ring_get_rptr, |
1580 | .get_wptr = uvd_v6_0_ring_get_wptr, | |
1581 | .set_wptr = uvd_v6_0_ring_set_wptr, | |
e12f3d7a | 1582 | .emit_frame_size = |
996cab95 | 1583 | 6 + /* hdp invalidate */ |
e12f3d7a | 1584 | 10 + /* uvd_v6_0_ring_emit_pipeline_sync */ |
49135593 | 1585 | VI_FLUSH_GPU_TLB_NUM_WREG * 6 + 8 + /* uvd_v6_0_ring_emit_vm_flush */ |
e12f3d7a CK |
1586 | 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */ |
1587 | .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */ | |
0f30a397 CK |
1588 | .emit_ib = uvd_v6_0_ring_emit_ib, |
1589 | .emit_fence = uvd_v6_0_ring_emit_fence, | |
1590 | .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush, | |
1591 | .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync, | |
996cab95 | 1592 | .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, |
0f30a397 | 1593 | .test_ring = uvd_v6_0_ring_test_ring, |
8de190c9 | 1594 | .test_ib = amdgpu_uvd_ring_test_ib, |
afb1436c | 1595 | .insert_nop = uvd_v6_0_ring_insert_nop, |
0f30a397 | 1596 | .pad_ib = amdgpu_ring_generic_pad_ib, |
c4120d55 CK |
1597 | .begin_use = amdgpu_uvd_ring_begin_use, |
1598 | .end_use = amdgpu_uvd_ring_end_use, | |
25299898 | 1599 | .emit_wreg = uvd_v6_0_ring_emit_wreg, |
0f30a397 CK |
1600 | }; |
1601 | ||
c259ee6e JZ |
1602 | static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = { |
1603 | .type = AMDGPU_RING_TYPE_UVD_ENC, | |
1604 | .align_mask = 0x3f, | |
1605 | .nop = HEVC_ENC_CMD_NO_OP, | |
1606 | .support_64bit_ptrs = false, | |
7ee250b1 | 1607 | .no_user_fence = true, |
c259ee6e JZ |
1608 | .get_rptr = uvd_v6_0_enc_ring_get_rptr, |
1609 | .get_wptr = uvd_v6_0_enc_ring_get_wptr, | |
1610 | .set_wptr = uvd_v6_0_enc_ring_set_wptr, | |
1611 | .emit_frame_size = | |
1612 | 4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */ | |
60b431b5 | 1613 | 5 + /* uvd_v6_0_enc_ring_emit_vm_flush */ |
c259ee6e JZ |
1614 | 5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */ |
1615 | 1, /* uvd_v6_0_enc_ring_insert_end */ | |
1616 | .emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */ | |
1617 | .emit_ib = uvd_v6_0_enc_ring_emit_ib, | |
1618 | .emit_fence = uvd_v6_0_enc_ring_emit_fence, | |
1619 | .emit_vm_flush = uvd_v6_0_enc_ring_emit_vm_flush, | |
1620 | .emit_pipeline_sync = uvd_v6_0_enc_ring_emit_pipeline_sync, | |
2a91f272 | 1621 | .test_ring = uvd_v6_0_enc_ring_test_ring, |
e0128efb | 1622 | .test_ib = uvd_v6_0_enc_ring_test_ib, |
c259ee6e JZ |
1623 | .insert_nop = amdgpu_ring_insert_nop, |
1624 | .insert_end = uvd_v6_0_enc_ring_insert_end, | |
1625 | .pad_ib = amdgpu_ring_generic_pad_ib, | |
1626 | .begin_use = amdgpu_uvd_ring_begin_use, | |
1627 | .end_use = amdgpu_uvd_ring_end_use, | |
1628 | }; | |
1629 | ||
aaa36a97 AD |
1630 | static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) |
1631 | { | |
a05c92d1 | 1632 | if (adev->asic_type >= CHIP_POLARIS10) { |
2bb795f5 | 1633 | adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs; |
0f30a397 CK |
1634 | DRM_INFO("UVD is enabled in VM mode\n"); |
1635 | } else { | |
2bb795f5 | 1636 | adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs; |
0f30a397 CK |
1637 | DRM_INFO("UVD is enabled in physical mode\n"); |
1638 | } | |
aaa36a97 AD |
1639 | } |
1640 | ||
c259ee6e JZ |
1641 | static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev) |
1642 | { | |
1643 | int i; | |
1644 | ||
1645 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) | |
2bb795f5 | 1646 | adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs; |
c259ee6e JZ |
1647 | |
1648 | DRM_INFO("UVD ENC is enabled in VM mode\n"); | |
1649 | } | |
1650 | ||
aaa36a97 AD |
1651 | static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = { |
1652 | .set = uvd_v6_0_set_interrupt_state, | |
1653 | .process = uvd_v6_0_process_interrupt, | |
1654 | }; | |
1655 | ||
1656 | static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev) | |
1657 | { | |
65da0d40 | 1658 | if (uvd_v6_0_enc_support(adev)) |
2bb795f5 | 1659 | adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1; |
65da0d40 | 1660 | else |
2bb795f5 | 1661 | adev->uvd.inst->irq.num_types = 1; |
65da0d40 | 1662 | |
2bb795f5 | 1663 | adev->uvd.inst->irq.funcs = &uvd_v6_0_irq_funcs; |
aaa36a97 | 1664 | } |
a1255107 AD |
1665 | |
1666 | const struct amdgpu_ip_block_version uvd_v6_0_ip_block = | |
1667 | { | |
1668 | .type = AMD_IP_BLOCK_TYPE_UVD, | |
1669 | .major = 6, | |
1670 | .minor = 0, | |
1671 | .rev = 0, | |
1672 | .funcs = &uvd_v6_0_ip_funcs, | |
1673 | }; | |
1674 | ||
1675 | const struct amdgpu_ip_block_version uvd_v6_2_ip_block = | |
1676 | { | |
1677 | .type = AMD_IP_BLOCK_TYPE_UVD, | |
1678 | .major = 6, | |
1679 | .minor = 2, | |
1680 | .rev = 0, | |
1681 | .funcs = &uvd_v6_0_ip_funcs, | |
1682 | }; | |
1683 | ||
1684 | const struct amdgpu_ip_block_version uvd_v6_3_ip_block = | |
1685 | { | |
1686 | .type = AMD_IP_BLOCK_TYPE_UVD, | |
1687 | .major = 6, | |
1688 | .minor = 3, | |
1689 | .rev = 0, | |
1690 | .funcs = &uvd_v6_0_ip_funcs, | |
1691 | }; |