2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Christian König <christian.koenig@amd.com>
25 #include <linux/firmware.h>
28 #include "amdgpu_uvd.h"
30 #include "uvd/uvd_6_0_d.h"
31 #include "uvd/uvd_6_0_sh_mask.h"
32 #include "oss/oss_2_0_d.h"
33 #include "oss/oss_2_0_sh_mask.h"
34 #include "smu/smu_7_1_3_d.h"
35 #include "smu/smu_7_1_3_sh_mask.h"
36 #include "bif/bif_5_1_d.h"
37 #include "gmc/gmc_8_1_d.h"
40 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
41 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
42 static int uvd_v6_0_start(struct amdgpu_device *adev);
43 static void uvd_v6_0_stop(struct amdgpu_device *adev);
44 static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
45 static int uvd_v6_0_set_clockgating_state(void *handle,
46 enum amd_clockgating_state state);
47 static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
51 * uvd_v6_0_ring_get_rptr - get read pointer
53 * @ring: amdgpu_ring pointer
55 * Returns the current hardware read pointer
57 static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
59 struct amdgpu_device *adev = ring->adev;
61 return RREG32(mmUVD_RBC_RB_RPTR);
65 * uvd_v6_0_ring_get_wptr - get write pointer
67 * @ring: amdgpu_ring pointer
69 * Returns the current hardware write pointer
71 static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
73 struct amdgpu_device *adev = ring->adev;
75 return RREG32(mmUVD_RBC_RB_WPTR);
79 * uvd_v6_0_ring_set_wptr - set write pointer
81 * @ring: amdgpu_ring pointer
83 * Commits the write pointer to the hardware
85 static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
87 struct amdgpu_device *adev = ring->adev;
89 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
92 static int uvd_v6_0_early_init(void *handle)
94 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
96 uvd_v6_0_set_ring_funcs(adev);
97 uvd_v6_0_set_irq_funcs(adev);
102 static int uvd_v6_0_sw_init(void *handle)
104 struct amdgpu_ring *ring;
106 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
109 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq);
113 r = amdgpu_uvd_sw_init(adev);
117 r = amdgpu_uvd_resume(adev);
121 ring = &adev->uvd.ring;
122 sprintf(ring->name, "uvd");
123 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
128 static int uvd_v6_0_sw_fini(void *handle)
131 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
133 r = amdgpu_uvd_suspend(adev);
137 return amdgpu_uvd_sw_fini(adev);
141 * uvd_v6_0_hw_init - start and test UVD block
143 * @adev: amdgpu_device pointer
145 * Initialize the hardware, boot up the VCPU and do some testing
147 static int uvd_v6_0_hw_init(void *handle)
149 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
150 struct amdgpu_ring *ring = &adev->uvd.ring;
154 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
155 uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
156 uvd_v6_0_enable_mgcg(adev, true);
159 r = amdgpu_ring_test_ring(ring);
165 r = amdgpu_ring_alloc(ring, 10);
167 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
171 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
172 amdgpu_ring_write(ring, tmp);
173 amdgpu_ring_write(ring, 0xFFFFF);
175 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
176 amdgpu_ring_write(ring, tmp);
177 amdgpu_ring_write(ring, 0xFFFFF);
179 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
180 amdgpu_ring_write(ring, tmp);
181 amdgpu_ring_write(ring, 0xFFFFF);
183 /* Clear timeout status bits */
184 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
185 amdgpu_ring_write(ring, 0x8);
187 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
188 amdgpu_ring_write(ring, 3);
190 amdgpu_ring_commit(ring);
194 DRM_INFO("UVD initialized successfully.\n");
200 * uvd_v6_0_hw_fini - stop the hardware block
202 * @adev: amdgpu_device pointer
204 * Stop the UVD block, mark ring as not ready any more
206 static int uvd_v6_0_hw_fini(void *handle)
208 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
209 struct amdgpu_ring *ring = &adev->uvd.ring;
211 if (RREG32(mmUVD_STATUS) != 0)
219 static int uvd_v6_0_suspend(void *handle)
222 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
224 r = uvd_v6_0_hw_fini(adev);
228 return amdgpu_uvd_suspend(adev);
231 static int uvd_v6_0_resume(void *handle)
234 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
236 r = amdgpu_uvd_resume(adev);
240 return uvd_v6_0_hw_init(adev);
244 * uvd_v6_0_mc_resume - memory controller programming
246 * @adev: amdgpu_device pointer
248 * Let the UVD memory controller know it's offsets
250 static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
255 /* programm memory controller bits 0-27 */
256 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
257 lower_32_bits(adev->uvd.gpu_addr));
258 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
259 upper_32_bits(adev->uvd.gpu_addr));
261 offset = AMDGPU_UVD_FIRMWARE_OFFSET;
262 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
263 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
264 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
267 size = AMDGPU_UVD_HEAP_SIZE;
268 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
269 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
272 size = AMDGPU_UVD_STACK_SIZE +
273 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
274 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
275 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
277 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
278 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
279 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
281 WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
285 static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
290 data = RREG32(mmUVD_CGC_GATE);
291 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
293 data |= UVD_CGC_GATE__SYS_MASK |
294 UVD_CGC_GATE__UDEC_MASK |
295 UVD_CGC_GATE__MPEG2_MASK |
296 UVD_CGC_GATE__RBC_MASK |
297 UVD_CGC_GATE__LMI_MC_MASK |
298 UVD_CGC_GATE__IDCT_MASK |
299 UVD_CGC_GATE__MPRD_MASK |
300 UVD_CGC_GATE__MPC_MASK |
301 UVD_CGC_GATE__LBSI_MASK |
302 UVD_CGC_GATE__LRBBM_MASK |
303 UVD_CGC_GATE__UDEC_RE_MASK |
304 UVD_CGC_GATE__UDEC_CM_MASK |
305 UVD_CGC_GATE__UDEC_IT_MASK |
306 UVD_CGC_GATE__UDEC_DB_MASK |
307 UVD_CGC_GATE__UDEC_MP_MASK |
308 UVD_CGC_GATE__WCB_MASK |
309 UVD_CGC_GATE__VCPU_MASK |
310 UVD_CGC_GATE__SCPU_MASK;
311 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
312 UVD_SUVD_CGC_GATE__SIT_MASK |
313 UVD_SUVD_CGC_GATE__SMP_MASK |
314 UVD_SUVD_CGC_GATE__SCM_MASK |
315 UVD_SUVD_CGC_GATE__SDB_MASK |
316 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
317 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
318 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
319 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
320 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
321 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
322 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
323 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
325 data &= ~(UVD_CGC_GATE__SYS_MASK |
326 UVD_CGC_GATE__UDEC_MASK |
327 UVD_CGC_GATE__MPEG2_MASK |
328 UVD_CGC_GATE__RBC_MASK |
329 UVD_CGC_GATE__LMI_MC_MASK |
330 UVD_CGC_GATE__LMI_UMC_MASK |
331 UVD_CGC_GATE__IDCT_MASK |
332 UVD_CGC_GATE__MPRD_MASK |
333 UVD_CGC_GATE__MPC_MASK |
334 UVD_CGC_GATE__LBSI_MASK |
335 UVD_CGC_GATE__LRBBM_MASK |
336 UVD_CGC_GATE__UDEC_RE_MASK |
337 UVD_CGC_GATE__UDEC_CM_MASK |
338 UVD_CGC_GATE__UDEC_IT_MASK |
339 UVD_CGC_GATE__UDEC_DB_MASK |
340 UVD_CGC_GATE__UDEC_MP_MASK |
341 UVD_CGC_GATE__WCB_MASK |
342 UVD_CGC_GATE__VCPU_MASK |
343 UVD_CGC_GATE__SCPU_MASK);
344 data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
345 UVD_SUVD_CGC_GATE__SIT_MASK |
346 UVD_SUVD_CGC_GATE__SMP_MASK |
347 UVD_SUVD_CGC_GATE__SCM_MASK |
348 UVD_SUVD_CGC_GATE__SDB_MASK |
349 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
350 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
351 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
352 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
353 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
354 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
355 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
356 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK);
358 WREG32(mmUVD_CGC_GATE, data);
359 WREG32(mmUVD_SUVD_CGC_GATE, data1);
364 * uvd_v6_0_start - start UVD block
366 * @adev: amdgpu_device pointer
368 * Setup and start the UVD block
370 static int uvd_v6_0_start(struct amdgpu_device *adev)
372 struct amdgpu_ring *ring = &adev->uvd.ring;
373 uint32_t rb_bufsz, tmp;
374 uint32_t lmi_swap_cntl;
375 uint32_t mp_swap_cntl;
379 WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
381 /* disable byte swapping */
385 uvd_v6_0_mc_resume(adev);
387 /* disable interupt */
388 WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
390 /* stall UMC and register bus before resetting VCPU */
391 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
394 /* put LMI, VCPU, RBC etc... into reset */
395 WREG32(mmUVD_SOFT_RESET,
396 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
397 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
398 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
399 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
400 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
401 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
402 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
403 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
406 /* take UVD block out of reset */
407 WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
410 /* initialize UVD memory controller */
411 WREG32(mmUVD_LMI_CTRL,
412 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
413 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
414 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
415 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
416 UVD_LMI_CTRL__REQ_MODE_MASK |
417 UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
420 /* swap (8 in 32) RB and IB */
424 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
425 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
427 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
428 WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
429 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
430 WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
431 WREG32(mmUVD_MPC_SET_ALU, 0);
432 WREG32(mmUVD_MPC_SET_MUX, 0x88);
434 /* take all subblocks out of reset, except VCPU */
435 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
438 /* enable VCPU clock */
439 WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
442 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
444 /* boot up the VCPU */
445 WREG32(mmUVD_SOFT_RESET, 0);
448 for (i = 0; i < 10; ++i) {
451 for (j = 0; j < 100; ++j) {
452 status = RREG32(mmUVD_STATUS);
461 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
462 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
464 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
470 DRM_ERROR("UVD not responding, giving up!!!\n");
473 /* enable master interrupt */
474 WREG32_P(mmUVD_MASTINT_EN,
475 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
476 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
478 /* clear the bit 4 of UVD_STATUS */
479 WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
481 /* force RBC into idle state */
482 rb_bufsz = order_base_2(ring->ring_size);
483 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
484 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
485 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
486 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
487 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
488 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
489 WREG32(mmUVD_RBC_RB_CNTL, tmp);
491 /* set the write pointer delay */
492 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
494 /* set the wb address */
495 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
497 /* programm the RB_BASE for ring buffer */
498 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
499 lower_32_bits(ring->gpu_addr));
500 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
501 upper_32_bits(ring->gpu_addr));
503 /* Initialize the ring buffer's read and write pointers */
504 WREG32(mmUVD_RBC_RB_RPTR, 0);
506 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
507 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
509 WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
515 * uvd_v6_0_stop - stop UVD block
517 * @adev: amdgpu_device pointer
521 static void uvd_v6_0_stop(struct amdgpu_device *adev)
523 /* force RBC into idle state */
524 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
526 /* Stall UMC and register bus before resetting VCPU */
527 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
530 /* put VCPU into reset */
531 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
534 /* disable VCPU clock */
535 WREG32(mmUVD_VCPU_CNTL, 0x0);
537 /* Unstall UMC and register bus */
538 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
540 WREG32(mmUVD_STATUS, 0);
544 * uvd_v6_0_ring_emit_fence - emit an fence & trap command
546 * @ring: amdgpu_ring pointer
547 * @fence: fence to emit
549 * Write a fence and a trap command to the ring.
551 static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
554 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
556 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
557 amdgpu_ring_write(ring, seq);
558 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
559 amdgpu_ring_write(ring, addr & 0xffffffff);
560 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
561 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
562 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
563 amdgpu_ring_write(ring, 0);
565 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
566 amdgpu_ring_write(ring, 0);
567 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
568 amdgpu_ring_write(ring, 0);
569 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
570 amdgpu_ring_write(ring, 2);
574 * uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush
576 * @ring: amdgpu_ring pointer
578 * Emits an hdp flush.
580 static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
582 amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
583 amdgpu_ring_write(ring, 0);
587 * uvd_v6_0_ring_hdp_invalidate - emit an hdp invalidate
589 * @ring: amdgpu_ring pointer
591 * Emits an hdp invalidate.
593 static void uvd_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
595 amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
596 amdgpu_ring_write(ring, 1);
600 * uvd_v6_0_ring_test_ring - register write test
602 * @ring: amdgpu_ring pointer
604 * Test if we can successfully write to the context register
606 static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
608 struct amdgpu_device *adev = ring->adev;
613 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
614 r = amdgpu_ring_alloc(ring, 3);
616 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
620 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
621 amdgpu_ring_write(ring, 0xDEADBEEF);
622 amdgpu_ring_commit(ring);
623 for (i = 0; i < adev->usec_timeout; i++) {
624 tmp = RREG32(mmUVD_CONTEXT_ID);
625 if (tmp == 0xDEADBEEF)
630 if (i < adev->usec_timeout) {
631 DRM_INFO("ring test on %d succeeded in %d usecs\n",
634 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
642 * uvd_v6_0_ring_emit_ib - execute indirect buffer
644 * @ring: amdgpu_ring pointer
645 * @ib: indirect buffer to execute
647 * Write ring commands to execute the indirect buffer
649 static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
650 struct amdgpu_ib *ib,
651 unsigned vm_id, bool ctx_switch)
653 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
654 amdgpu_ring_write(ring, vm_id);
656 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
657 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
658 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
659 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
660 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
661 amdgpu_ring_write(ring, ib->length_dw);
664 static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
665 unsigned vm_id, uint64_t pd_addr)
670 reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id;
672 reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8;
674 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
675 amdgpu_ring_write(ring, reg << 2);
676 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
677 amdgpu_ring_write(ring, pd_addr >> 12);
678 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
679 amdgpu_ring_write(ring, 0x8);
681 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
682 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
683 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
684 amdgpu_ring_write(ring, 1 << vm_id);
685 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
686 amdgpu_ring_write(ring, 0x8);
688 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
689 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
690 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
691 amdgpu_ring_write(ring, 0);
692 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
693 amdgpu_ring_write(ring, 1 << vm_id); /* mask */
694 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
695 amdgpu_ring_write(ring, 0xC);
698 static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
700 uint32_t seq = ring->fence_drv.sync_seq;
701 uint64_t addr = ring->fence_drv.gpu_addr;
703 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
704 amdgpu_ring_write(ring, lower_32_bits(addr));
705 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
706 amdgpu_ring_write(ring, upper_32_bits(addr));
707 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
708 amdgpu_ring_write(ring, 0xffffffff); /* mask */
709 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
710 amdgpu_ring_write(ring, seq);
711 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
712 amdgpu_ring_write(ring, 0xE);
715 static bool uvd_v6_0_is_idle(void *handle)
717 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
719 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
722 static int uvd_v6_0_wait_for_idle(void *handle)
725 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
727 for (i = 0; i < adev->usec_timeout; i++) {
728 if (uvd_v6_0_is_idle(handle))
734 #define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
735 static bool uvd_v6_0_check_soft_reset(void *handle)
737 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
738 u32 srbm_soft_reset = 0;
739 u32 tmp = RREG32(mmSRBM_STATUS);
741 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
742 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
743 (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
744 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
746 if (srbm_soft_reset) {
747 adev->uvd.srbm_soft_reset = srbm_soft_reset;
750 adev->uvd.srbm_soft_reset = 0;
755 static int uvd_v6_0_pre_soft_reset(void *handle)
757 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
759 if (!adev->uvd.srbm_soft_reset)
766 static int uvd_v6_0_soft_reset(void *handle)
768 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
771 if (!adev->uvd.srbm_soft_reset)
773 srbm_soft_reset = adev->uvd.srbm_soft_reset;
775 if (srbm_soft_reset) {
778 tmp = RREG32(mmSRBM_SOFT_RESET);
779 tmp |= srbm_soft_reset;
780 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
781 WREG32(mmSRBM_SOFT_RESET, tmp);
782 tmp = RREG32(mmSRBM_SOFT_RESET);
786 tmp &= ~srbm_soft_reset;
787 WREG32(mmSRBM_SOFT_RESET, tmp);
788 tmp = RREG32(mmSRBM_SOFT_RESET);
790 /* Wait a little for things to settle down */
797 static int uvd_v6_0_post_soft_reset(void *handle)
799 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
801 if (!adev->uvd.srbm_soft_reset)
806 return uvd_v6_0_start(adev);
809 static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
810 struct amdgpu_irq_src *source,
812 enum amdgpu_interrupt_state state)
818 static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
819 struct amdgpu_irq_src *source,
820 struct amdgpu_iv_entry *entry)
822 DRM_DEBUG("IH: UVD TRAP\n");
823 amdgpu_fence_process(&adev->uvd.ring);
827 static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
829 uint32_t data1, data3;
831 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
832 data3 = RREG32(mmUVD_CGC_GATE);
834 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
835 UVD_SUVD_CGC_GATE__SIT_MASK |
836 UVD_SUVD_CGC_GATE__SMP_MASK |
837 UVD_SUVD_CGC_GATE__SCM_MASK |
838 UVD_SUVD_CGC_GATE__SDB_MASK |
839 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
840 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
841 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
842 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
843 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
844 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
845 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
846 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
849 data3 |= (UVD_CGC_GATE__SYS_MASK |
850 UVD_CGC_GATE__UDEC_MASK |
851 UVD_CGC_GATE__MPEG2_MASK |
852 UVD_CGC_GATE__RBC_MASK |
853 UVD_CGC_GATE__LMI_MC_MASK |
854 UVD_CGC_GATE__LMI_UMC_MASK |
855 UVD_CGC_GATE__IDCT_MASK |
856 UVD_CGC_GATE__MPRD_MASK |
857 UVD_CGC_GATE__MPC_MASK |
858 UVD_CGC_GATE__LBSI_MASK |
859 UVD_CGC_GATE__LRBBM_MASK |
860 UVD_CGC_GATE__UDEC_RE_MASK |
861 UVD_CGC_GATE__UDEC_CM_MASK |
862 UVD_CGC_GATE__UDEC_IT_MASK |
863 UVD_CGC_GATE__UDEC_DB_MASK |
864 UVD_CGC_GATE__UDEC_MP_MASK |
865 UVD_CGC_GATE__WCB_MASK |
866 UVD_CGC_GATE__JPEG_MASK |
867 UVD_CGC_GATE__SCPU_MASK |
868 UVD_CGC_GATE__JPEG2_MASK);
869 /* only in pg enabled, we can gate clock to vcpu*/
870 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
871 data3 |= UVD_CGC_GATE__VCPU_MASK;
873 data3 &= ~UVD_CGC_GATE__REGS_MASK;
878 WREG32(mmUVD_SUVD_CGC_GATE, data1);
879 WREG32(mmUVD_CGC_GATE, data3);
882 static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
884 uint32_t data, data2;
886 data = RREG32(mmUVD_CGC_CTRL);
887 data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
890 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
891 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
894 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
895 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
896 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
898 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
899 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
900 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
901 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
902 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
903 UVD_CGC_CTRL__SYS_MODE_MASK |
904 UVD_CGC_CTRL__UDEC_MODE_MASK |
905 UVD_CGC_CTRL__MPEG2_MODE_MASK |
906 UVD_CGC_CTRL__REGS_MODE_MASK |
907 UVD_CGC_CTRL__RBC_MODE_MASK |
908 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
909 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
910 UVD_CGC_CTRL__IDCT_MODE_MASK |
911 UVD_CGC_CTRL__MPRD_MODE_MASK |
912 UVD_CGC_CTRL__MPC_MODE_MASK |
913 UVD_CGC_CTRL__LBSI_MODE_MASK |
914 UVD_CGC_CTRL__LRBBM_MODE_MASK |
915 UVD_CGC_CTRL__WCB_MODE_MASK |
916 UVD_CGC_CTRL__VCPU_MODE_MASK |
917 UVD_CGC_CTRL__JPEG_MODE_MASK |
918 UVD_CGC_CTRL__SCPU_MODE_MASK |
919 UVD_CGC_CTRL__JPEG2_MODE_MASK);
920 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
921 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
922 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
923 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
924 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
926 WREG32(mmUVD_CGC_CTRL, data);
927 WREG32(mmUVD_SUVD_CGC_CTRL, data2);
931 static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
933 uint32_t data, data1, cgc_flags, suvd_flags;
935 data = RREG32(mmUVD_CGC_GATE);
936 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
938 cgc_flags = UVD_CGC_GATE__SYS_MASK |
939 UVD_CGC_GATE__UDEC_MASK |
940 UVD_CGC_GATE__MPEG2_MASK |
941 UVD_CGC_GATE__RBC_MASK |
942 UVD_CGC_GATE__LMI_MC_MASK |
943 UVD_CGC_GATE__IDCT_MASK |
944 UVD_CGC_GATE__MPRD_MASK |
945 UVD_CGC_GATE__MPC_MASK |
946 UVD_CGC_GATE__LBSI_MASK |
947 UVD_CGC_GATE__LRBBM_MASK |
948 UVD_CGC_GATE__UDEC_RE_MASK |
949 UVD_CGC_GATE__UDEC_CM_MASK |
950 UVD_CGC_GATE__UDEC_IT_MASK |
951 UVD_CGC_GATE__UDEC_DB_MASK |
952 UVD_CGC_GATE__UDEC_MP_MASK |
953 UVD_CGC_GATE__WCB_MASK |
954 UVD_CGC_GATE__VCPU_MASK |
955 UVD_CGC_GATE__SCPU_MASK |
956 UVD_CGC_GATE__JPEG_MASK |
957 UVD_CGC_GATE__JPEG2_MASK;
959 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
960 UVD_SUVD_CGC_GATE__SIT_MASK |
961 UVD_SUVD_CGC_GATE__SMP_MASK |
962 UVD_SUVD_CGC_GATE__SCM_MASK |
963 UVD_SUVD_CGC_GATE__SDB_MASK;
968 WREG32(mmUVD_CGC_GATE, data);
969 WREG32(mmUVD_SUVD_CGC_GATE, data1);
973 static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
978 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
979 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
981 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
983 orig = data = RREG32(mmUVD_CGC_CTRL);
984 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
986 WREG32(mmUVD_CGC_CTRL, data);
988 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
990 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
992 orig = data = RREG32(mmUVD_CGC_CTRL);
993 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
995 WREG32(mmUVD_CGC_CTRL, data);
999 static int uvd_v6_0_set_clockgating_state(void *handle,
1000 enum amd_clockgating_state state)
1002 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1003 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1006 /* wait for STATUS to clear */
1007 if (uvd_v6_0_wait_for_idle(handle))
1009 uvd_v6_0_enable_clock_gating(adev, true);
1010 /* enable HW gates because UVD is idle */
1011 /* uvd_v6_0_set_hw_clock_gating(adev); */
1013 /* disable HW gating and enable Sw gating */
1014 uvd_v6_0_enable_clock_gating(adev, false);
1016 uvd_v6_0_set_sw_clock_gating(adev);
1020 static int uvd_v6_0_set_powergating_state(void *handle,
1021 enum amd_powergating_state state)
1023 /* This doesn't actually powergate the UVD block.
1024 * That's done in the dpm code via the SMC. This
1025 * just re-inits the block as necessary. The actual
1026 * gating still happens in the dpm code. We should
1027 * revisit this when there is a cleaner line between
1028 * the smc and the hw blocks
1030 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1033 WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1035 if (state == AMD_PG_STATE_GATE) {
1036 uvd_v6_0_stop(adev);
1038 ret = uvd_v6_0_start(adev);
1047 static void uvd_v6_0_get_clockgating_state(void *handle, u32 *flags)
1049 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1052 mutex_lock(&adev->pm.mutex);
1054 if (adev->flags & AMD_IS_APU)
1055 data = RREG32_SMC(ixCURRENT_PG_STATUS_APU);
1057 data = RREG32_SMC(ixCURRENT_PG_STATUS);
1059 if (data & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
1060 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
1064 /* AMD_CG_SUPPORT_UVD_MGCG */
1065 data = RREG32(mmUVD_CGC_CTRL);
1066 if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
1067 *flags |= AMD_CG_SUPPORT_UVD_MGCG;
1070 mutex_unlock(&adev->pm.mutex);
1073 static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
1075 .early_init = uvd_v6_0_early_init,
1077 .sw_init = uvd_v6_0_sw_init,
1078 .sw_fini = uvd_v6_0_sw_fini,
1079 .hw_init = uvd_v6_0_hw_init,
1080 .hw_fini = uvd_v6_0_hw_fini,
1081 .suspend = uvd_v6_0_suspend,
1082 .resume = uvd_v6_0_resume,
1083 .is_idle = uvd_v6_0_is_idle,
1084 .wait_for_idle = uvd_v6_0_wait_for_idle,
1085 .check_soft_reset = uvd_v6_0_check_soft_reset,
1086 .pre_soft_reset = uvd_v6_0_pre_soft_reset,
1087 .soft_reset = uvd_v6_0_soft_reset,
1088 .post_soft_reset = uvd_v6_0_post_soft_reset,
1089 .set_clockgating_state = uvd_v6_0_set_clockgating_state,
1090 .set_powergating_state = uvd_v6_0_set_powergating_state,
1091 .get_clockgating_state = uvd_v6_0_get_clockgating_state,
1094 static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
1095 .type = AMDGPU_RING_TYPE_UVD,
1097 .nop = PACKET0(mmUVD_NO_OP, 0),
1098 .support_64bit_ptrs = false,
1099 .get_rptr = uvd_v6_0_ring_get_rptr,
1100 .get_wptr = uvd_v6_0_ring_get_wptr,
1101 .set_wptr = uvd_v6_0_ring_set_wptr,
1102 .parse_cs = amdgpu_uvd_ring_parse_cs,
1104 2 + /* uvd_v6_0_ring_emit_hdp_flush */
1105 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
1106 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1107 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
1108 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1109 .emit_ib = uvd_v6_0_ring_emit_ib,
1110 .emit_fence = uvd_v6_0_ring_emit_fence,
1111 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1112 .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
1113 .test_ring = uvd_v6_0_ring_test_ring,
1114 .test_ib = amdgpu_uvd_ring_test_ib,
1115 .insert_nop = amdgpu_ring_insert_nop,
1116 .pad_ib = amdgpu_ring_generic_pad_ib,
1117 .begin_use = amdgpu_uvd_ring_begin_use,
1118 .end_use = amdgpu_uvd_ring_end_use,
1121 static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1122 .type = AMDGPU_RING_TYPE_UVD,
1124 .nop = PACKET0(mmUVD_NO_OP, 0),
1125 .support_64bit_ptrs = false,
1126 .get_rptr = uvd_v6_0_ring_get_rptr,
1127 .get_wptr = uvd_v6_0_ring_get_wptr,
1128 .set_wptr = uvd_v6_0_ring_set_wptr,
1130 2 + /* uvd_v6_0_ring_emit_hdp_flush */
1131 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
1132 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1133 20 + /* uvd_v6_0_ring_emit_vm_flush */
1134 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
1135 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1136 .emit_ib = uvd_v6_0_ring_emit_ib,
1137 .emit_fence = uvd_v6_0_ring_emit_fence,
1138 .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
1139 .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
1140 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1141 .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
1142 .test_ring = uvd_v6_0_ring_test_ring,
1143 .test_ib = amdgpu_uvd_ring_test_ib,
1144 .insert_nop = amdgpu_ring_insert_nop,
1145 .pad_ib = amdgpu_ring_generic_pad_ib,
1146 .begin_use = amdgpu_uvd_ring_begin_use,
1147 .end_use = amdgpu_uvd_ring_end_use,
1150 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
1152 if (adev->asic_type >= CHIP_POLARIS10) {
1153 adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs;
1154 DRM_INFO("UVD is enabled in VM mode\n");
1156 adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs;
1157 DRM_INFO("UVD is enabled in physical mode\n");
1161 static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
1162 .set = uvd_v6_0_set_interrupt_state,
1163 .process = uvd_v6_0_process_interrupt,
1166 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1168 adev->uvd.irq.num_types = 1;
1169 adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs;
1172 const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
1174 .type = AMD_IP_BLOCK_TYPE_UVD,
1178 .funcs = &uvd_v6_0_ip_funcs,
1181 const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
1183 .type = AMD_IP_BLOCK_TYPE_UVD,
1187 .funcs = &uvd_v6_0_ip_funcs,
1190 const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
1192 .type = AMD_IP_BLOCK_TYPE_UVD,
1196 .funcs = &uvd_v6_0_ip_funcs,