Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso...
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / uvd_v6_0.c
CommitLineData
aaa36a97
AD
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König <christian.koenig@amd.com>
23 */
24
25#include <linux/firmware.h>
47b757fb 26
aaa36a97
AD
27#include "amdgpu.h"
28#include "amdgpu_uvd.h"
29#include "vid.h"
30#include "uvd/uvd_6_0_d.h"
31#include "uvd/uvd_6_0_sh_mask.h"
32#include "oss/oss_2_0_d.h"
33#include "oss/oss_2_0_sh_mask.h"
a0cdef9e
AD
34#include "smu/smu_7_1_3_d.h"
35#include "smu/smu_7_1_3_sh_mask.h"
d5b4e25d 36#include "bif/bif_5_1_d.h"
0f30a397 37#include "gmc/gmc_8_1_d.h"
be3ecca7 38#include "vi.h"
091aec0b 39#include "ivsrcid/ivsrcid_vislands30.h"
aaa36a97 40
dead73d7
JZ
41/* Polaris10/11/12 firmware version */
42#define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8))
43
aaa36a97 44static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
c259ee6e
JZ
45static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev);
46
aaa36a97
AD
47static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
48static int uvd_v6_0_start(struct amdgpu_device *adev);
49static void uvd_v6_0_stop(struct amdgpu_device *adev);
be3ecca7 50static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
805b3ba8
RZ
51static int uvd_v6_0_set_clockgating_state(void *handle,
52 enum amd_clockgating_state state);
53static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
54 bool enable);
aaa36a97 55
06a7e9cb
JZ
56/**
57* uvd_v6_0_enc_support - get encode support status
58*
59* @adev: amdgpu_device pointer
60*
61* Returns the current hardware encode support status
62*/
63static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
64{
dead73d7 65 return ((adev->asic_type >= CHIP_POLARIS10) &&
136b10ad 66 (adev->asic_type <= CHIP_VEGAM) &&
dead73d7 67 (!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16));
06a7e9cb
JZ
68}
69
aaa36a97
AD
70/**
71 * uvd_v6_0_ring_get_rptr - get read pointer
72 *
73 * @ring: amdgpu_ring pointer
74 *
75 * Returns the current hardware read pointer
76 */
536fbf94 77static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
aaa36a97
AD
78{
79 struct amdgpu_device *adev = ring->adev;
80
81 return RREG32(mmUVD_RBC_RB_RPTR);
82}
83
c0f2f2e6
JZ
84/**
85 * uvd_v6_0_enc_ring_get_rptr - get enc read pointer
86 *
87 * @ring: amdgpu_ring pointer
88 *
89 * Returns the current hardware enc read pointer
90 */
91static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
92{
93 struct amdgpu_device *adev = ring->adev;
94
2bb795f5 95 if (ring == &adev->uvd.inst->ring_enc[0])
c0f2f2e6
JZ
96 return RREG32(mmUVD_RB_RPTR);
97 else
98 return RREG32(mmUVD_RB_RPTR2);
99}
aaa36a97
AD
100/**
101 * uvd_v6_0_ring_get_wptr - get write pointer
102 *
103 * @ring: amdgpu_ring pointer
104 *
105 * Returns the current hardware write pointer
106 */
536fbf94 107static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
aaa36a97
AD
108{
109 struct amdgpu_device *adev = ring->adev;
110
111 return RREG32(mmUVD_RBC_RB_WPTR);
112}
113
c0f2f2e6
JZ
114/**
115 * uvd_v6_0_enc_ring_get_wptr - get enc write pointer
116 *
117 * @ring: amdgpu_ring pointer
118 *
119 * Returns the current hardware enc write pointer
120 */
121static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
122{
123 struct amdgpu_device *adev = ring->adev;
124
2bb795f5 125 if (ring == &adev->uvd.inst->ring_enc[0])
c0f2f2e6
JZ
126 return RREG32(mmUVD_RB_WPTR);
127 else
128 return RREG32(mmUVD_RB_WPTR2);
129}
130
aaa36a97
AD
131/**
132 * uvd_v6_0_ring_set_wptr - set write pointer
133 *
134 * @ring: amdgpu_ring pointer
135 *
136 * Commits the write pointer to the hardware
137 */
138static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
139{
140 struct amdgpu_device *adev = ring->adev;
141
536fbf94 142 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
aaa36a97
AD
143}
144
c0f2f2e6
JZ
145/**
146 * uvd_v6_0_enc_ring_set_wptr - set enc write pointer
147 *
148 * @ring: amdgpu_ring pointer
149 *
150 * Commits the enc write pointer to the hardware
151 */
152static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
153{
154 struct amdgpu_device *adev = ring->adev;
155
2bb795f5 156 if (ring == &adev->uvd.inst->ring_enc[0])
c0f2f2e6
JZ
157 WREG32(mmUVD_RB_WPTR,
158 lower_32_bits(ring->wptr));
159 else
160 WREG32(mmUVD_RB_WPTR2,
161 lower_32_bits(ring->wptr));
162}
163
2a91f272
JZ
164/**
165 * uvd_v6_0_enc_ring_test_ring - test if UVD ENC ring is working
166 *
167 * @ring: the engine to test on
168 *
169 */
170static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
171{
172 struct amdgpu_device *adev = ring->adev;
517b91f4 173 uint32_t rptr;
2a91f272
JZ
174 unsigned i;
175 int r;
176
177 r = amdgpu_ring_alloc(ring, 16);
dc9eeff8 178 if (r)
2a91f272 179 return r;
517b91f4
S
180
181 rptr = amdgpu_ring_get_rptr(ring);
182
2a91f272
JZ
183 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
184 amdgpu_ring_commit(ring);
185
186 for (i = 0; i < adev->usec_timeout; i++) {
187 if (amdgpu_ring_get_rptr(ring) != rptr)
188 break;
c366be54 189 udelay(1);
2a91f272
JZ
190 }
191
dc9eeff8 192 if (i >= adev->usec_timeout)
2a91f272 193 r = -ETIMEDOUT;
2a91f272
JZ
194
195 return r;
196}
197
e0128efb
JZ
198/**
199 * uvd_v6_0_enc_get_create_msg - generate a UVD ENC create msg
200 *
e0128efb
JZ
201 * @ring: ring we should submit the msg to
202 * @handle: session handle to use
166c2089 203 * @bo: amdgpu object for which we query the offset
e0128efb
JZ
204 * @fence: optional fence to return
205 *
206 * Open up a stream for HW test
207 */
208static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ce584a8e 209 struct amdgpu_bo *bo,
e0128efb
JZ
210 struct dma_fence **fence)
211{
212 const unsigned ib_size_dw = 16;
213 struct amdgpu_job *job;
214 struct amdgpu_ib *ib;
215 struct dma_fence *f = NULL;
ce584a8e 216 uint64_t addr;
e0128efb
JZ
217 int i, r;
218
c8e42d57 219 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
220 AMDGPU_IB_POOL_DIRECT, &job);
e0128efb
JZ
221 if (r)
222 return r;
223
224 ib = &job->ibs[0];
ce584a8e 225 addr = amdgpu_bo_gpu_offset(bo);
e0128efb
JZ
226
227 ib->length_dw = 0;
228 ib->ptr[ib->length_dw++] = 0x00000018;
229 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
230 ib->ptr[ib->length_dw++] = handle;
231 ib->ptr[ib->length_dw++] = 0x00010000;
ce584a8e
AD
232 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
233 ib->ptr[ib->length_dw++] = addr;
e0128efb
JZ
234
235 ib->ptr[ib->length_dw++] = 0x00000014;
236 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
237 ib->ptr[ib->length_dw++] = 0x0000001c;
238 ib->ptr[ib->length_dw++] = 0x00000001;
239 ib->ptr[ib->length_dw++] = 0x00000000;
240
241 ib->ptr[ib->length_dw++] = 0x00000008;
242 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
243
244 for (i = ib->length_dw; i < ib_size_dw; ++i)
245 ib->ptr[i] = 0x0;
246
ee913fd9 247 r = amdgpu_job_submit_direct(job, ring, &f);
e0128efb
JZ
248 if (r)
249 goto err;
250
e0128efb
JZ
251 if (fence)
252 *fence = dma_fence_get(f);
253 dma_fence_put(f);
254 return 0;
255
256err:
257 amdgpu_job_free(job);
258 return r;
259}
260
261/**
262 * uvd_v6_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
263 *
e0128efb
JZ
264 * @ring: ring we should submit the msg to
265 * @handle: session handle to use
166c2089 266 * @bo: amdgpu object for which we query the offset
e0128efb
JZ
267 * @fence: optional fence to return
268 *
269 * Close up a stream for HW test or if userspace failed to do so
270 */
f15507a1
CIK
271static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
272 uint32_t handle,
ce584a8e 273 struct amdgpu_bo *bo,
ec442fd3 274 struct dma_fence **fence)
e0128efb
JZ
275{
276 const unsigned ib_size_dw = 16;
277 struct amdgpu_job *job;
278 struct amdgpu_ib *ib;
279 struct dma_fence *f = NULL;
ce584a8e 280 uint64_t addr;
e0128efb
JZ
281 int i, r;
282
c8e42d57 283 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
284 AMDGPU_IB_POOL_DIRECT, &job);
e0128efb
JZ
285 if (r)
286 return r;
287
288 ib = &job->ibs[0];
ce584a8e 289 addr = amdgpu_bo_gpu_offset(bo);
e0128efb
JZ
290
291 ib->length_dw = 0;
292 ib->ptr[ib->length_dw++] = 0x00000018;
293 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
294 ib->ptr[ib->length_dw++] = handle;
295 ib->ptr[ib->length_dw++] = 0x00010000;
ce584a8e
AD
296 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
297 ib->ptr[ib->length_dw++] = addr;
e0128efb
JZ
298
299 ib->ptr[ib->length_dw++] = 0x00000014;
300 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
301 ib->ptr[ib->length_dw++] = 0x0000001c;
302 ib->ptr[ib->length_dw++] = 0x00000001;
303 ib->ptr[ib->length_dw++] = 0x00000000;
304
305 ib->ptr[ib->length_dw++] = 0x00000008;
306 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
307
308 for (i = ib->length_dw; i < ib_size_dw; ++i)
309 ib->ptr[i] = 0x0;
310
ec442fd3 311 r = amdgpu_job_submit_direct(job, ring, &f);
ee913fd9
CK
312 if (r)
313 goto err;
e0128efb
JZ
314
315 if (fence)
316 *fence = dma_fence_get(f);
317 dma_fence_put(f);
318 return 0;
319
320err:
321 amdgpu_job_free(job);
322 return r;
323}
324
325/**
326 * uvd_v6_0_enc_ring_test_ib - test if UVD ENC IBs are working
327 *
328 * @ring: the engine to test on
166c2089 329 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
e0128efb
JZ
330 *
331 */
332static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
333{
334 struct dma_fence *fence = NULL;
ce584a8e 335 struct amdgpu_bo *bo = NULL;
e0128efb
JZ
336 long r;
337
ce584a8e
AD
338 r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
339 AMDGPU_GEM_DOMAIN_VRAM,
340 &bo, NULL, NULL);
341 if (r)
342 return r;
343
344 r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL);
98079389 345 if (r)
e0128efb 346 goto error;
e0128efb 347
ce584a8e 348 r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence);
98079389 349 if (r)
e0128efb 350 goto error;
e0128efb
JZ
351
352 r = dma_fence_wait_timeout(fence, false, timeout);
98079389 353 if (r == 0)
e0128efb 354 r = -ETIMEDOUT;
98079389 355 else if (r > 0)
e0128efb 356 r = 0;
98079389 357
e0128efb
JZ
358error:
359 dma_fence_put(fence);
ce584a8e
AD
360 amdgpu_bo_unreserve(bo);
361 amdgpu_bo_unref(&bo);
e0128efb
JZ
362 return r;
363}
98079389 364
5fc3aeeb 365static int uvd_v6_0_early_init(void *handle)
aaa36a97 366{
5fc3aeeb 367 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2bb795f5 368 adev->uvd.num_uvd_inst = 1;
5fc3aeeb 369
cb4b02d7
LL
370 if (!(adev->flags & AMD_IS_APU) &&
371 (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
372 return -ENOENT;
373
aaa36a97 374 uvd_v6_0_set_ring_funcs(adev);
06a7e9cb
JZ
375
376 if (uvd_v6_0_enc_support(adev)) {
377 adev->uvd.num_enc_rings = 2;
c259ee6e 378 uvd_v6_0_set_enc_ring_funcs(adev);
06a7e9cb
JZ
379 }
380
aaa36a97
AD
381 uvd_v6_0_set_irq_funcs(adev);
382
383 return 0;
384}
385
5fc3aeeb 386static int uvd_v6_0_sw_init(void *handle)
aaa36a97
AD
387{
388 struct amdgpu_ring *ring;
06a7e9cb 389 int i, r;
5fc3aeeb 390 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
391
392 /* UVD TRAP */
1ffdeca6 393 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
aaa36a97
AD
394 if (r)
395 return r;
396
65da0d40
JZ
397 /* UVD ENC TRAP */
398 if (uvd_v6_0_enc_support(adev)) {
399 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
1ffdeca6 400 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq);
65da0d40
JZ
401 if (r)
402 return r;
403 }
404 }
405
aaa36a97
AD
406 r = amdgpu_uvd_sw_init(adev);
407 if (r)
408 return r;
409
dead73d7
JZ
410 if (!uvd_v6_0_enc_support(adev)) {
411 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
2bb795f5 412 adev->uvd.inst->ring_enc[i].funcs = NULL;
dead73d7 413
2bb795f5 414 adev->uvd.inst->irq.num_types = 1;
dead73d7
JZ
415 adev->uvd.num_enc_rings = 0;
416
417 DRM_INFO("UVD ENC is disabled\n");
296191c5
JZ
418 }
419
2bb795f5 420 ring = &adev->uvd.inst->ring;
aaa36a97 421 sprintf(ring->name, "uvd");
1c6d567b 422 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
c107171b 423 AMDGPU_RING_PRIO_DEFAULT, NULL);
06a7e9cb
JZ
424 if (r)
425 return r;
426
3b34c14f
CW
427 r = amdgpu_uvd_resume(adev);
428 if (r)
429 return r;
430
06a7e9cb
JZ
431 if (uvd_v6_0_enc_support(adev)) {
432 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
2bb795f5 433 ring = &adev->uvd.inst->ring_enc[i];
06a7e9cb 434 sprintf(ring->name, "uvd_enc%d", i);
1c6d567b
ND
435 r = amdgpu_ring_init(adev, ring, 512,
436 &adev->uvd.inst->irq, 0,
c107171b 437 AMDGPU_RING_PRIO_DEFAULT, NULL);
06a7e9cb
JZ
438 if (r)
439 return r;
440 }
441 }
aaa36a97 442
33d5bd07
ED
443 r = amdgpu_uvd_entity_init(adev);
444
aaa36a97
AD
445 return r;
446}
447
5fc3aeeb 448static int uvd_v6_0_sw_fini(void *handle)
aaa36a97 449{
06a7e9cb 450 int i, r;
5fc3aeeb 451 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
452
453 r = amdgpu_uvd_suspend(adev);
454 if (r)
455 return r;
456
06a7e9cb
JZ
457 if (uvd_v6_0_enc_support(adev)) {
458 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
2bb795f5 459 amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
06a7e9cb
JZ
460 }
461
50237287 462 return amdgpu_uvd_sw_fini(adev);
aaa36a97
AD
463}
464
465/**
466 * uvd_v6_0_hw_init - start and test UVD block
467 *
166c2089 468 * @handle: handle used to pass amdgpu_device pointer
aaa36a97
AD
469 *
470 * Initialize the hardware, boot up the VCPU and do some testing
471 */
5fc3aeeb 472static int uvd_v6_0_hw_init(void *handle)
aaa36a97 473{
5fc3aeeb 474 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2bb795f5 475 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
aaa36a97 476 uint32_t tmp;
2a91f272 477 int i, r;
aaa36a97 478
e3e672e6
RZ
479 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
480 uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
481 uvd_v6_0_enable_mgcg(adev, true);
aaa36a97 482
c66ed765
AG
483 r = amdgpu_ring_test_helper(ring);
484 if (r)
aaa36a97 485 goto done;
aaa36a97 486
a27de35c 487 r = amdgpu_ring_alloc(ring, 10);
aaa36a97
AD
488 if (r) {
489 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
490 goto done;
491 }
492
493 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
494 amdgpu_ring_write(ring, tmp);
495 amdgpu_ring_write(ring, 0xFFFFF);
496
497 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
498 amdgpu_ring_write(ring, tmp);
499 amdgpu_ring_write(ring, 0xFFFFF);
500
501 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
502 amdgpu_ring_write(ring, tmp);
503 amdgpu_ring_write(ring, 0xFFFFF);
504
505 /* Clear timeout status bits */
506 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
507 amdgpu_ring_write(ring, 0x8);
508
509 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
510 amdgpu_ring_write(ring, 3);
511
a27de35c 512 amdgpu_ring_commit(ring);
aaa36a97 513
2a91f272
JZ
514 if (uvd_v6_0_enc_support(adev)) {
515 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
2bb795f5 516 ring = &adev->uvd.inst->ring_enc[i];
c66ed765
AG
517 r = amdgpu_ring_test_helper(ring);
518 if (r)
2a91f272 519 goto done;
2a91f272
JZ
520 }
521 }
522
aaa36a97 523done:
c259ee6e
JZ
524 if (!r) {
525 if (uvd_v6_0_enc_support(adev))
526 DRM_INFO("UVD and UVD ENC initialized successfully.\n");
527 else
528 DRM_INFO("UVD initialized successfully.\n");
529 }
aaa36a97
AD
530
531 return r;
532}
533
534/**
535 * uvd_v6_0_hw_fini - stop the hardware block
536 *
166c2089 537 * @handle: handle used to pass amdgpu_device pointer
aaa36a97
AD
538 *
539 * Stop the UVD block, mark ring as not ready any more
540 */
5fc3aeeb 541static int uvd_v6_0_hw_fini(void *handle)
aaa36a97 542{
5fc3aeeb 543 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97 544
e3e672e6
RZ
545 if (RREG32(mmUVD_STATUS) != 0)
546 uvd_v6_0_stop(adev);
547
aaa36a97
AD
548 return 0;
549}
550
5fc3aeeb 551static int uvd_v6_0_suspend(void *handle)
aaa36a97
AD
552{
553 int r;
5fc3aeeb 554 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97 555
3f99dd81
LL
556 r = uvd_v6_0_hw_fini(adev);
557 if (r)
558 return r;
559
d3daa2c7 560 return amdgpu_uvd_suspend(adev);
aaa36a97
AD
561}
562
5fc3aeeb 563static int uvd_v6_0_resume(void *handle)
aaa36a97
AD
564{
565 int r;
5fc3aeeb 566 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97 567
d3daa2c7
TSD
568 r = amdgpu_uvd_resume(adev);
569 if (r)
570 return r;
571
50237287 572 return uvd_v6_0_hw_init(adev);
aaa36a97
AD
573}
574
575/**
576 * uvd_v6_0_mc_resume - memory controller programming
577 *
578 * @adev: amdgpu_device pointer
579 *
580 * Let the UVD memory controller know it's offsets
581 */
582static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
583{
584 uint64_t offset;
585 uint32_t size;
586
f349f772 587 /* program memory controller bits 0-27 */
aaa36a97 588 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
2bb795f5 589 lower_32_bits(adev->uvd.inst->gpu_addr));
aaa36a97 590 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
2bb795f5 591 upper_32_bits(adev->uvd.inst->gpu_addr));
aaa36a97
AD
592
593 offset = AMDGPU_UVD_FIRMWARE_OFFSET;
c1fe75c9 594 size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
aaa36a97
AD
595 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
596 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
597
598 offset += size;
c0365541 599 size = AMDGPU_UVD_HEAP_SIZE;
aaa36a97
AD
600 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
601 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
602
603 offset += size;
c0365541
AN
604 size = AMDGPU_UVD_STACK_SIZE +
605 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
aaa36a97
AD
606 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
607 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
549300ce
AD
608
609 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
610 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
611 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
c0365541
AN
612
613 WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
aaa36a97
AD
614}
615
be3ecca7 616#if 0
9b08a306
EH
617static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
618 bool enable)
619{
620 u32 data, data1;
621
622 data = RREG32(mmUVD_CGC_GATE);
623 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
624 if (enable) {
625 data |= UVD_CGC_GATE__SYS_MASK |
626 UVD_CGC_GATE__UDEC_MASK |
627 UVD_CGC_GATE__MPEG2_MASK |
628 UVD_CGC_GATE__RBC_MASK |
629 UVD_CGC_GATE__LMI_MC_MASK |
630 UVD_CGC_GATE__IDCT_MASK |
631 UVD_CGC_GATE__MPRD_MASK |
632 UVD_CGC_GATE__MPC_MASK |
633 UVD_CGC_GATE__LBSI_MASK |
634 UVD_CGC_GATE__LRBBM_MASK |
635 UVD_CGC_GATE__UDEC_RE_MASK |
636 UVD_CGC_GATE__UDEC_CM_MASK |
637 UVD_CGC_GATE__UDEC_IT_MASK |
638 UVD_CGC_GATE__UDEC_DB_MASK |
639 UVD_CGC_GATE__UDEC_MP_MASK |
640 UVD_CGC_GATE__WCB_MASK |
641 UVD_CGC_GATE__VCPU_MASK |
642 UVD_CGC_GATE__SCPU_MASK;
643 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
644 UVD_SUVD_CGC_GATE__SIT_MASK |
645 UVD_SUVD_CGC_GATE__SMP_MASK |
646 UVD_SUVD_CGC_GATE__SCM_MASK |
647 UVD_SUVD_CGC_GATE__SDB_MASK |
648 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
649 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
650 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
651 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
652 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
653 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
654 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
655 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
656 } else {
657 data &= ~(UVD_CGC_GATE__SYS_MASK |
658 UVD_CGC_GATE__UDEC_MASK |
659 UVD_CGC_GATE__MPEG2_MASK |
660 UVD_CGC_GATE__RBC_MASK |
661 UVD_CGC_GATE__LMI_MC_MASK |
662 UVD_CGC_GATE__LMI_UMC_MASK |
663 UVD_CGC_GATE__IDCT_MASK |
664 UVD_CGC_GATE__MPRD_MASK |
665 UVD_CGC_GATE__MPC_MASK |
666 UVD_CGC_GATE__LBSI_MASK |
667 UVD_CGC_GATE__LRBBM_MASK |
668 UVD_CGC_GATE__UDEC_RE_MASK |
669 UVD_CGC_GATE__UDEC_CM_MASK |
670 UVD_CGC_GATE__UDEC_IT_MASK |
671 UVD_CGC_GATE__UDEC_DB_MASK |
672 UVD_CGC_GATE__UDEC_MP_MASK |
673 UVD_CGC_GATE__WCB_MASK |
674 UVD_CGC_GATE__VCPU_MASK |
675 UVD_CGC_GATE__SCPU_MASK);
676 data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
677 UVD_SUVD_CGC_GATE__SIT_MASK |
678 UVD_SUVD_CGC_GATE__SMP_MASK |
679 UVD_SUVD_CGC_GATE__SCM_MASK |
680 UVD_SUVD_CGC_GATE__SDB_MASK |
681 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
682 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
683 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
684 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
685 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
686 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
687 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
688 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK);
689 }
690 WREG32(mmUVD_CGC_GATE, data);
691 WREG32(mmUVD_SUVD_CGC_GATE, data1);
692}
be3ecca7 693#endif
9b08a306 694
aaa36a97
AD
695/**
696 * uvd_v6_0_start - start UVD block
697 *
698 * @adev: amdgpu_device pointer
699 *
700 * Setup and start the UVD block
701 */
702static int uvd_v6_0_start(struct amdgpu_device *adev)
703{
2bb795f5 704 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
aaa36a97
AD
705 uint32_t rb_bufsz, tmp;
706 uint32_t lmi_swap_cntl;
707 uint32_t mp_swap_cntl;
708 int i, j, r;
709
f78c3422
TSD
710 /* disable DPG */
711 WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
aaa36a97
AD
712
713 /* disable byte swapping */
714 lmi_swap_cntl = 0;
715 mp_swap_cntl = 0;
716
717 uvd_v6_0_mc_resume(adev);
718
aaa36a97 719 /* disable interupt */
f4a7f127 720 WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
aaa36a97
AD
721
722 /* stall UMC and register bus before resetting VCPU */
f4a7f127 723 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
aaa36a97
AD
724 mdelay(1);
725
726 /* put LMI, VCPU, RBC etc... into reset */
f78c3422
TSD
727 WREG32(mmUVD_SOFT_RESET,
728 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
729 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
730 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
731 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
732 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
733 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
734 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
aaa36a97
AD
735 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
736 mdelay(5);
737
738 /* take UVD block out of reset */
f4a7f127 739 WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
aaa36a97
AD
740 mdelay(5);
741
742 /* initialize UVD memory controller */
f78c3422
TSD
743 WREG32(mmUVD_LMI_CTRL,
744 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
745 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
746 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
747 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
748 UVD_LMI_CTRL__REQ_MODE_MASK |
749 UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
aaa36a97
AD
750
751#ifdef __BIG_ENDIAN
752 /* swap (8 in 32) RB and IB */
753 lmi_swap_cntl = 0xa;
754 mp_swap_cntl = 0;
755#endif
756 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
757 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
758
759 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
760 WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
761 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
762 WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
763 WREG32(mmUVD_MPC_SET_ALU, 0);
764 WREG32(mmUVD_MPC_SET_MUX, 0x88);
765
766 /* take all subblocks out of reset, except VCPU */
767 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
768 mdelay(5);
769
770 /* enable VCPU clock */
f78c3422 771 WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
aaa36a97
AD
772
773 /* enable UMC */
f4a7f127 774 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
aaa36a97
AD
775
776 /* boot up the VCPU */
777 WREG32(mmUVD_SOFT_RESET, 0);
778 mdelay(10);
779
780 for (i = 0; i < 10; ++i) {
781 uint32_t status;
782
783 for (j = 0; j < 100; ++j) {
784 status = RREG32(mmUVD_STATUS);
785 if (status & 2)
786 break;
787 mdelay(10);
788 }
789 r = 0;
790 if (status & 2)
791 break;
792
793 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
f4a7f127 794 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
aaa36a97 795 mdelay(10);
f4a7f127 796 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
aaa36a97
AD
797 mdelay(10);
798 r = -1;
799 }
800
801 if (r) {
802 DRM_ERROR("UVD not responding, giving up!!!\n");
803 return r;
804 }
805 /* enable master interrupt */
f78c3422
TSD
806 WREG32_P(mmUVD_MASTINT_EN,
807 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
808 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
aaa36a97
AD
809
810 /* clear the bit 4 of UVD_STATUS */
f78c3422 811 WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
aaa36a97 812
f4a7f127 813 /* force RBC into idle state */
aaa36a97 814 rb_bufsz = order_base_2(ring->ring_size);
f4a7f127 815 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
aaa36a97
AD
816 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
817 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
818 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
819 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
820 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
aaa36a97
AD
821 WREG32(mmUVD_RBC_RB_CNTL, tmp);
822
823 /* set the write pointer delay */
824 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
825
826 /* set the wb address */
827 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
828
f349f772 829 /* program the RB_BASE for ring buffer */
aaa36a97
AD
830 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
831 lower_32_bits(ring->gpu_addr));
832 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
833 upper_32_bits(ring->gpu_addr));
834
835 /* Initialize the ring buffer's read and write pointers */
836 WREG32(mmUVD_RBC_RB_RPTR, 0);
837
838 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
536fbf94 839 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
aaa36a97 840
f4a7f127 841 WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
aaa36a97 842
06a7e9cb 843 if (uvd_v6_0_enc_support(adev)) {
2bb795f5 844 ring = &adev->uvd.inst->ring_enc[0];
06a7e9cb
JZ
845 WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
846 WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
847 WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr);
848 WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
849 WREG32(mmUVD_RB_SIZE, ring->ring_size / 4);
850
2bb795f5 851 ring = &adev->uvd.inst->ring_enc[1];
06a7e9cb
JZ
852 WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
853 WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
854 WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr);
855 WREG32(mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
856 WREG32(mmUVD_RB_SIZE2, ring->ring_size / 4);
857 }
858
aaa36a97
AD
859 return 0;
860}
861
862/**
863 * uvd_v6_0_stop - stop UVD block
864 *
865 * @adev: amdgpu_device pointer
866 *
867 * stop the UVD block
868 */
869static void uvd_v6_0_stop(struct amdgpu_device *adev)
870{
871 /* force RBC into idle state */
872 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
873
874 /* Stall UMC and register bus before resetting VCPU */
875 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
876 mdelay(1);
877
878 /* put VCPU into reset */
879 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
880 mdelay(5);
881
882 /* disable VCPU clock */
883 WREG32(mmUVD_VCPU_CNTL, 0x0);
884
885 /* Unstall UMC and register bus */
886 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
e3e672e6
RZ
887
888 WREG32(mmUVD_STATUS, 0);
aaa36a97
AD
889}
890
891/**
892 * uvd_v6_0_ring_emit_fence - emit an fence & trap command
893 *
894 * @ring: amdgpu_ring pointer
166c2089
LJ
895 * @addr: address
896 * @seq: sequence number
897 * @flags: fence related flags
aaa36a97
AD
898 *
899 * Write a fence and a trap command to the ring.
900 */
901static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
890ee23f 902 unsigned flags)
aaa36a97 903{
890ee23f 904 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
aaa36a97
AD
905
906 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
907 amdgpu_ring_write(ring, seq);
908 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
909 amdgpu_ring_write(ring, addr & 0xffffffff);
910 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
911 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
912 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
913 amdgpu_ring_write(ring, 0);
914
915 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
916 amdgpu_ring_write(ring, 0);
917 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
918 amdgpu_ring_write(ring, 0);
919 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
920 amdgpu_ring_write(ring, 2);
921}
922
c0f2f2e6
JZ
923/**
924 * uvd_v6_0_enc_ring_emit_fence - emit an enc fence & trap command
925 *
926 * @ring: amdgpu_ring pointer
166c2089
LJ
927 * @addr: address
928 * @seq: sequence number
929 * @flags: fence related flags
c0f2f2e6
JZ
930 *
931 * Write enc a fence and a trap command to the ring.
932 */
933static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
934 u64 seq, unsigned flags)
935{
936 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
937
938 amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
939 amdgpu_ring_write(ring, addr);
940 amdgpu_ring_write(ring, upper_32_bits(addr));
941 amdgpu_ring_write(ring, seq);
942 amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
943}
944
996cab95
CK
945/**
946 * uvd_v6_0_ring_emit_hdp_flush - skip HDP flushing
947 *
948 * @ring: amdgpu_ring pointer
949 */
950static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
951{
952 /* The firmware doesn't seem to like touching registers at this point. */
953}
954
aaa36a97
AD
955/**
956 * uvd_v6_0_ring_test_ring - register write test
957 *
958 * @ring: amdgpu_ring pointer
959 *
960 * Test if we can successfully write to the context register
961 */
962static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
963{
964 struct amdgpu_device *adev = ring->adev;
965 uint32_t tmp = 0;
966 unsigned i;
967 int r;
968
969 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
a27de35c 970 r = amdgpu_ring_alloc(ring, 3);
725b2611 971 if (r)
aaa36a97 972 return r;
725b2611 973
aaa36a97
AD
974 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
975 amdgpu_ring_write(ring, 0xDEADBEEF);
a27de35c 976 amdgpu_ring_commit(ring);
aaa36a97
AD
977 for (i = 0; i < adev->usec_timeout; i++) {
978 tmp = RREG32(mmUVD_CONTEXT_ID);
979 if (tmp == 0xDEADBEEF)
980 break;
c366be54 981 udelay(1);
aaa36a97
AD
982 }
983
725b2611
CK
984 if (i >= adev->usec_timeout)
985 r = -ETIMEDOUT;
986
aaa36a97
AD
987 return r;
988}
989
990/**
991 * uvd_v6_0_ring_emit_ib - execute indirect buffer
992 *
993 * @ring: amdgpu_ring pointer
166c2089 994 * @job: job to retrieve vmid from
aaa36a97 995 * @ib: indirect buffer to execute
166c2089 996 * @flags: unused
aaa36a97
AD
997 *
998 * Write ring commands to execute the indirect buffer
999 */
1000static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
34955e03 1001 struct amdgpu_job *job,
d88bf583 1002 struct amdgpu_ib *ib,
c4c905ec 1003 uint32_t flags)
aaa36a97 1004{
34955e03
RZ
1005 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1006
0f30a397 1007 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
c4f46f22 1008 amdgpu_ring_write(ring, vmid);
0f30a397 1009
aaa36a97
AD
1010 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
1011 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1012 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
1013 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1014 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
1015 amdgpu_ring_write(ring, ib->length_dw);
1016}
1017
c0f2f2e6
JZ
1018/**
1019 * uvd_v6_0_enc_ring_emit_ib - enc execute indirect buffer
1020 *
1021 * @ring: amdgpu_ring pointer
166c2089 1022 * @job: job to retrive vmid from
c0f2f2e6 1023 * @ib: indirect buffer to execute
166c2089 1024 * @flags: unused
c0f2f2e6
JZ
1025 *
1026 * Write enc ring commands to execute the indirect buffer
1027 */
1028static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
34955e03
RZ
1029 struct amdgpu_job *job,
1030 struct amdgpu_ib *ib,
c4c905ec 1031 uint32_t flags)
c0f2f2e6 1032{
34955e03
RZ
1033 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1034
c0f2f2e6 1035 amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
c4f46f22 1036 amdgpu_ring_write(ring, vmid);
c0f2f2e6
JZ
1037 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1038 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1039 amdgpu_ring_write(ring, ib->length_dw);
1040}
1041
25299898
CK
1042static void uvd_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
1043 uint32_t reg, uint32_t val)
1044{
1045 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1046 amdgpu_ring_write(ring, reg << 2);
1047 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1048 amdgpu_ring_write(ring, val);
1049 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1050 amdgpu_ring_write(ring, 0x8);
1051}
1052
0f30a397 1053static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
c633c00b 1054 unsigned vmid, uint64_t pd_addr)
0f30a397 1055{
c633c00b 1056 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
0f30a397
CK
1057
1058 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1059 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
1060 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1061 amdgpu_ring_write(ring, 0);
1062 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
c4f46f22 1063 amdgpu_ring_write(ring, 1 << vmid); /* mask */
0f30a397
CK
1064 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1065 amdgpu_ring_write(ring, 0xC);
1066}
1067
1068static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1069{
1070 uint32_t seq = ring->fence_drv.sync_seq;
1071 uint64_t addr = ring->fence_drv.gpu_addr;
1072
1073 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1074 amdgpu_ring_write(ring, lower_32_bits(addr));
1075 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1076 amdgpu_ring_write(ring, upper_32_bits(addr));
1077 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1078 amdgpu_ring_write(ring, 0xffffffff); /* mask */
1079 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
1080 amdgpu_ring_write(ring, seq);
1081 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1082 amdgpu_ring_write(ring, 0xE);
1083}
1084
1aac3c91
LL
1085static void uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1086{
1087 int i;
1088
1089 WARN_ON(ring->wptr % 2 || count % 2);
1090
1091 for (i = 0; i < count / 2; i++) {
1092 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
1093 amdgpu_ring_write(ring, 0);
1094 }
1095}
1096
c0f2f2e6
JZ
1097static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1098{
1099 uint32_t seq = ring->fence_drv.sync_seq;
1100 uint64_t addr = ring->fence_drv.gpu_addr;
1101
1102 amdgpu_ring_write(ring, HEVC_ENC_CMD_WAIT_GE);
1103 amdgpu_ring_write(ring, lower_32_bits(addr));
1104 amdgpu_ring_write(ring, upper_32_bits(addr));
1105 amdgpu_ring_write(ring, seq);
1106}
1107
1108static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1109{
1110 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1111}
1112
1113static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
c633c00b 1114 unsigned int vmid, uint64_t pd_addr)
c0f2f2e6
JZ
1115{
1116 amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
c4f46f22 1117 amdgpu_ring_write(ring, vmid);
c0f2f2e6
JZ
1118 amdgpu_ring_write(ring, pd_addr >> 12);
1119
1120 amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB);
c4f46f22 1121 amdgpu_ring_write(ring, vmid);
c0f2f2e6
JZ
1122}
1123
5fc3aeeb 1124static bool uvd_v6_0_is_idle(void *handle)
aaa36a97 1125{
5fc3aeeb 1126 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1127
aaa36a97
AD
1128 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1129}
1130
5fc3aeeb 1131static int uvd_v6_0_wait_for_idle(void *handle)
aaa36a97
AD
1132{
1133 unsigned i;
5fc3aeeb 1134 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
1135
1136 for (i = 0; i < adev->usec_timeout; i++) {
f4a7f127 1137 if (uvd_v6_0_is_idle(handle))
aaa36a97
AD
1138 return 0;
1139 }
1140 return -ETIMEDOUT;
1141}
1142
fc0b3b90 1143#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
da146d3b 1144static bool uvd_v6_0_check_soft_reset(void *handle)
fc0b3b90
CZ
1145{
1146 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1147 u32 srbm_soft_reset = 0;
1148 u32 tmp = RREG32(mmSRBM_STATUS);
1149
1150 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1151 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1152 (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
1153 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1154
1155 if (srbm_soft_reset) {
2bb795f5 1156 adev->uvd.inst->srbm_soft_reset = srbm_soft_reset;
da146d3b 1157 return true;
fc0b3b90 1158 } else {
2bb795f5 1159 adev->uvd.inst->srbm_soft_reset = 0;
da146d3b 1160 return false;
fc0b3b90 1161 }
fc0b3b90 1162}
da146d3b 1163
fc0b3b90 1164static int uvd_v6_0_pre_soft_reset(void *handle)
aaa36a97 1165{
5fc3aeeb 1166 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1167
2bb795f5 1168 if (!adev->uvd.inst->srbm_soft_reset)
fc0b3b90
CZ
1169 return 0;
1170
aaa36a97 1171 uvd_v6_0_stop(adev);
fc0b3b90
CZ
1172 return 0;
1173}
1174
1175static int uvd_v6_0_soft_reset(void *handle)
1176{
1177 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1178 u32 srbm_soft_reset;
1179
2bb795f5 1180 if (!adev->uvd.inst->srbm_soft_reset)
fc0b3b90 1181 return 0;
2bb795f5 1182 srbm_soft_reset = adev->uvd.inst->srbm_soft_reset;
fc0b3b90
CZ
1183
1184 if (srbm_soft_reset) {
1185 u32 tmp;
1186
1187 tmp = RREG32(mmSRBM_SOFT_RESET);
1188 tmp |= srbm_soft_reset;
1189 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1190 WREG32(mmSRBM_SOFT_RESET, tmp);
1191 tmp = RREG32(mmSRBM_SOFT_RESET);
1192
1193 udelay(50);
1194
1195 tmp &= ~srbm_soft_reset;
1196 WREG32(mmSRBM_SOFT_RESET, tmp);
1197 tmp = RREG32(mmSRBM_SOFT_RESET);
1198
1199 /* Wait a little for things to settle down */
1200 udelay(50);
1201 }
1202
1203 return 0;
1204}
1205
1206static int uvd_v6_0_post_soft_reset(void *handle)
1207{
1208 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1209
2bb795f5 1210 if (!adev->uvd.inst->srbm_soft_reset)
fc0b3b90 1211 return 0;
aaa36a97 1212
aaa36a97
AD
1213 mdelay(5);
1214
1215 return uvd_v6_0_start(adev);
1216}
1217
aaa36a97
AD
1218static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
1219 struct amdgpu_irq_src *source,
1220 unsigned type,
1221 enum amdgpu_interrupt_state state)
1222{
1223 // TODO
1224 return 0;
1225}
1226
1227static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
1228 struct amdgpu_irq_src *source,
1229 struct amdgpu_iv_entry *entry)
1230{
65da0d40 1231 bool int_handled = true;
aaa36a97 1232 DRM_DEBUG("IH: UVD TRAP\n");
65da0d40
JZ
1233
1234 switch (entry->src_id) {
1235 case 124:
2bb795f5 1236 amdgpu_fence_process(&adev->uvd.inst->ring);
65da0d40
JZ
1237 break;
1238 case 119:
1239 if (likely(uvd_v6_0_enc_support(adev)))
2bb795f5 1240 amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]);
65da0d40
JZ
1241 else
1242 int_handled = false;
1243 break;
1244 case 120:
1245 if (likely(uvd_v6_0_enc_support(adev)))
2bb795f5 1246 amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]);
65da0d40
JZ
1247 else
1248 int_handled = false;
1249 break;
1250 }
1251
3d0c75af
ZB
1252 if (!int_handled)
1253 DRM_ERROR("Unhandled interrupt: %d %d\n",
65da0d40
JZ
1254 entry->src_id, entry->src_data[0]);
1255
aaa36a97
AD
1256 return 0;
1257}
1258
805b3ba8
RZ
1259static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
1260{
1261 uint32_t data1, data3;
1262
1263 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1264 data3 = RREG32(mmUVD_CGC_GATE);
1265
1266 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
1267 UVD_SUVD_CGC_GATE__SIT_MASK |
1268 UVD_SUVD_CGC_GATE__SMP_MASK |
1269 UVD_SUVD_CGC_GATE__SCM_MASK |
1270 UVD_SUVD_CGC_GATE__SDB_MASK |
1271 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
1272 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
1273 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
1274 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
1275 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
1276 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
1277 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
1278 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
1279
1280 if (enable) {
1281 data3 |= (UVD_CGC_GATE__SYS_MASK |
1282 UVD_CGC_GATE__UDEC_MASK |
1283 UVD_CGC_GATE__MPEG2_MASK |
1284 UVD_CGC_GATE__RBC_MASK |
1285 UVD_CGC_GATE__LMI_MC_MASK |
1286 UVD_CGC_GATE__LMI_UMC_MASK |
1287 UVD_CGC_GATE__IDCT_MASK |
1288 UVD_CGC_GATE__MPRD_MASK |
1289 UVD_CGC_GATE__MPC_MASK |
1290 UVD_CGC_GATE__LBSI_MASK |
1291 UVD_CGC_GATE__LRBBM_MASK |
1292 UVD_CGC_GATE__UDEC_RE_MASK |
1293 UVD_CGC_GATE__UDEC_CM_MASK |
1294 UVD_CGC_GATE__UDEC_IT_MASK |
1295 UVD_CGC_GATE__UDEC_DB_MASK |
1296 UVD_CGC_GATE__UDEC_MP_MASK |
1297 UVD_CGC_GATE__WCB_MASK |
805b3ba8
RZ
1298 UVD_CGC_GATE__JPEG_MASK |
1299 UVD_CGC_GATE__SCPU_MASK |
1300 UVD_CGC_GATE__JPEG2_MASK);
3c3a7e61
RZ
1301 /* only in pg enabled, we can gate clock to vcpu*/
1302 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1303 data3 |= UVD_CGC_GATE__VCPU_MASK;
1304
805b3ba8
RZ
1305 data3 &= ~UVD_CGC_GATE__REGS_MASK;
1306 } else {
1307 data3 = 0;
1308 }
1309
1310 WREG32(mmUVD_SUVD_CGC_GATE, data1);
1311 WREG32(mmUVD_CGC_GATE, data3);
1312}
1313
be3ecca7
TSD
1314static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
1315{
805b3ba8 1316 uint32_t data, data2;
be3ecca7
TSD
1317
1318 data = RREG32(mmUVD_CGC_CTRL);
be3ecca7
TSD
1319 data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
1320
805b3ba8 1321
be3ecca7
TSD
1322 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1323 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1324
be3ecca7
TSD
1325
1326 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1327 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1328 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1329
1330 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1331 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1332 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1333 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1334 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1335 UVD_CGC_CTRL__SYS_MODE_MASK |
1336 UVD_CGC_CTRL__UDEC_MODE_MASK |
1337 UVD_CGC_CTRL__MPEG2_MODE_MASK |
1338 UVD_CGC_CTRL__REGS_MODE_MASK |
1339 UVD_CGC_CTRL__RBC_MODE_MASK |
1340 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1341 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1342 UVD_CGC_CTRL__IDCT_MODE_MASK |
1343 UVD_CGC_CTRL__MPRD_MODE_MASK |
1344 UVD_CGC_CTRL__MPC_MODE_MASK |
1345 UVD_CGC_CTRL__LBSI_MODE_MASK |
1346 UVD_CGC_CTRL__LRBBM_MODE_MASK |
1347 UVD_CGC_CTRL__WCB_MODE_MASK |
1348 UVD_CGC_CTRL__VCPU_MODE_MASK |
1349 UVD_CGC_CTRL__JPEG_MODE_MASK |
1350 UVD_CGC_CTRL__SCPU_MODE_MASK |
1351 UVD_CGC_CTRL__JPEG2_MODE_MASK);
1352 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1353 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1354 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1355 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1356 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
be3ecca7
TSD
1357
1358 WREG32(mmUVD_CGC_CTRL, data);
be3ecca7
TSD
1359 WREG32(mmUVD_SUVD_CGC_CTRL, data2);
1360}
1361
1362#if 0
1363static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
1364{
1365 uint32_t data, data1, cgc_flags, suvd_flags;
1366
1367 data = RREG32(mmUVD_CGC_GATE);
1368 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1369
1370 cgc_flags = UVD_CGC_GATE__SYS_MASK |
1371 UVD_CGC_GATE__UDEC_MASK |
1372 UVD_CGC_GATE__MPEG2_MASK |
1373 UVD_CGC_GATE__RBC_MASK |
1374 UVD_CGC_GATE__LMI_MC_MASK |
1375 UVD_CGC_GATE__IDCT_MASK |
1376 UVD_CGC_GATE__MPRD_MASK |
1377 UVD_CGC_GATE__MPC_MASK |
1378 UVD_CGC_GATE__LBSI_MASK |
1379 UVD_CGC_GATE__LRBBM_MASK |
1380 UVD_CGC_GATE__UDEC_RE_MASK |
1381 UVD_CGC_GATE__UDEC_CM_MASK |
1382 UVD_CGC_GATE__UDEC_IT_MASK |
1383 UVD_CGC_GATE__UDEC_DB_MASK |
1384 UVD_CGC_GATE__UDEC_MP_MASK |
1385 UVD_CGC_GATE__WCB_MASK |
1386 UVD_CGC_GATE__VCPU_MASK |
1387 UVD_CGC_GATE__SCPU_MASK |
1388 UVD_CGC_GATE__JPEG_MASK |
1389 UVD_CGC_GATE__JPEG2_MASK;
1390
1391 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1392 UVD_SUVD_CGC_GATE__SIT_MASK |
1393 UVD_SUVD_CGC_GATE__SMP_MASK |
1394 UVD_SUVD_CGC_GATE__SCM_MASK |
1395 UVD_SUVD_CGC_GATE__SDB_MASK;
1396
1397 data |= cgc_flags;
1398 data1 |= suvd_flags;
1399
1400 WREG32(mmUVD_CGC_GATE, data);
1401 WREG32(mmUVD_SUVD_CGC_GATE, data1);
1402}
1403#endif
1404
805b3ba8
RZ
1405static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
1406 bool enable)
1407{
1408 u32 orig, data;
1409
1410 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
1411 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1412 data |= 0xfff;
1413 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1414
1415 orig = data = RREG32(mmUVD_CGC_CTRL);
1416 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1417 if (orig != data)
1418 WREG32(mmUVD_CGC_CTRL, data);
1419 } else {
1420 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1421 data &= ~0xfff;
1422 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1423
1424 orig = data = RREG32(mmUVD_CGC_CTRL);
1425 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1426 if (orig != data)
1427 WREG32(mmUVD_CGC_CTRL, data);
1428 }
1429}
1430
5fc3aeeb 1431static int uvd_v6_0_set_clockgating_state(void *handle,
1432 enum amd_clockgating_state state)
aaa36a97 1433{
9b08a306 1434 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
a9d4fe2f 1435 bool enable = (state == AMD_CG_STATE_GATE);
a0cdef9e 1436
4be5097c 1437 if (enable) {
be3ecca7
TSD
1438 /* wait for STATUS to clear */
1439 if (uvd_v6_0_wait_for_idle(handle))
1440 return -EBUSY;
805b3ba8 1441 uvd_v6_0_enable_clock_gating(adev, true);
be3ecca7
TSD
1442 /* enable HW gates because UVD is idle */
1443/* uvd_v6_0_set_hw_clock_gating(adev); */
805b3ba8
RZ
1444 } else {
1445 /* disable HW gating and enable Sw gating */
1446 uvd_v6_0_enable_clock_gating(adev, false);
9b08a306 1447 }
805b3ba8 1448 uvd_v6_0_set_sw_clock_gating(adev);
aaa36a97
AD
1449 return 0;
1450}
1451
5fc3aeeb 1452static int uvd_v6_0_set_powergating_state(void *handle,
1453 enum amd_powergating_state state)
aaa36a97
AD
1454{
1455 /* This doesn't actually powergate the UVD block.
1456 * That's done in the dpm code via the SMC. This
1457 * just re-inits the block as necessary. The actual
1458 * gating still happens in the dpm code. We should
1459 * revisit this when there is a cleaner line between
1460 * the smc and the hw blocks
1461 */
5fc3aeeb 1462 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
c8781f56 1463 int ret = 0;
5fc3aeeb 1464
fa5d2e0c
TSD
1465 WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1466
5fc3aeeb 1467 if (state == AMD_PG_STATE_GATE) {
aaa36a97 1468 uvd_v6_0_stop(adev);
aaa36a97 1469 } else {
c8781f56
HR
1470 ret = uvd_v6_0_start(adev);
1471 if (ret)
1472 goto out;
c8781f56
HR
1473 }
1474
1475out:
1476 return ret;
1477}
1478
1479static void uvd_v6_0_get_clockgating_state(void *handle, u32 *flags)
1480{
1481 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1482 int data;
1483
1484 mutex_lock(&adev->pm.mutex);
1485
1c622002
RZ
1486 if (adev->flags & AMD_IS_APU)
1487 data = RREG32_SMC(ixCURRENT_PG_STATUS_APU);
1488 else
1489 data = RREG32_SMC(ixCURRENT_PG_STATUS);
1490
1491 if (data & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
c8781f56
HR
1492 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
1493 goto out;
aaa36a97 1494 }
c8781f56
HR
1495
1496 /* AMD_CG_SUPPORT_UVD_MGCG */
1497 data = RREG32(mmUVD_CGC_CTRL);
1498 if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
1499 *flags |= AMD_CG_SUPPORT_UVD_MGCG;
1500
1501out:
1502 mutex_unlock(&adev->pm.mutex);
aaa36a97
AD
1503}
1504
a1255107 1505static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
88a907d6 1506 .name = "uvd_v6_0",
aaa36a97
AD
1507 .early_init = uvd_v6_0_early_init,
1508 .late_init = NULL,
1509 .sw_init = uvd_v6_0_sw_init,
1510 .sw_fini = uvd_v6_0_sw_fini,
1511 .hw_init = uvd_v6_0_hw_init,
1512 .hw_fini = uvd_v6_0_hw_fini,
1513 .suspend = uvd_v6_0_suspend,
1514 .resume = uvd_v6_0_resume,
1515 .is_idle = uvd_v6_0_is_idle,
1516 .wait_for_idle = uvd_v6_0_wait_for_idle,
fc0b3b90
CZ
1517 .check_soft_reset = uvd_v6_0_check_soft_reset,
1518 .pre_soft_reset = uvd_v6_0_pre_soft_reset,
aaa36a97 1519 .soft_reset = uvd_v6_0_soft_reset,
fc0b3b90 1520 .post_soft_reset = uvd_v6_0_post_soft_reset,
aaa36a97
AD
1521 .set_clockgating_state = uvd_v6_0_set_clockgating_state,
1522 .set_powergating_state = uvd_v6_0_set_powergating_state,
c8781f56 1523 .get_clockgating_state = uvd_v6_0_get_clockgating_state,
aaa36a97
AD
1524};
1525
0f30a397 1526static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
21cd942e 1527 .type = AMDGPU_RING_TYPE_UVD,
79887142 1528 .align_mask = 0xf,
536fbf94 1529 .support_64bit_ptrs = false,
7ee250b1 1530 .no_user_fence = true,
aaa36a97
AD
1531 .get_rptr = uvd_v6_0_ring_get_rptr,
1532 .get_wptr = uvd_v6_0_ring_get_wptr,
1533 .set_wptr = uvd_v6_0_ring_set_wptr,
1534 .parse_cs = amdgpu_uvd_ring_parse_cs,
e12f3d7a 1535 .emit_frame_size =
996cab95 1536 6 + /* hdp invalidate */
e12f3d7a
CK
1537 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1538 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
1539 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
aaa36a97
AD
1540 .emit_ib = uvd_v6_0_ring_emit_ib,
1541 .emit_fence = uvd_v6_0_ring_emit_fence,
996cab95 1542 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
aaa36a97 1543 .test_ring = uvd_v6_0_ring_test_ring,
8de190c9 1544 .test_ib = amdgpu_uvd_ring_test_ib,
1aac3c91 1545 .insert_nop = uvd_v6_0_ring_insert_nop,
9e5d5309 1546 .pad_ib = amdgpu_ring_generic_pad_ib,
c4120d55
CK
1547 .begin_use = amdgpu_uvd_ring_begin_use,
1548 .end_use = amdgpu_uvd_ring_end_use,
49135593 1549 .emit_wreg = uvd_v6_0_ring_emit_wreg,
aaa36a97
AD
1550};
1551
0f30a397 1552static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
21cd942e 1553 .type = AMDGPU_RING_TYPE_UVD,
79887142 1554 .align_mask = 0xf,
536fbf94 1555 .support_64bit_ptrs = false,
7ee250b1 1556 .no_user_fence = true,
0f30a397
CK
1557 .get_rptr = uvd_v6_0_ring_get_rptr,
1558 .get_wptr = uvd_v6_0_ring_get_wptr,
1559 .set_wptr = uvd_v6_0_ring_set_wptr,
e12f3d7a 1560 .emit_frame_size =
996cab95 1561 6 + /* hdp invalidate */
e12f3d7a 1562 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
49135593 1563 VI_FLUSH_GPU_TLB_NUM_WREG * 6 + 8 + /* uvd_v6_0_ring_emit_vm_flush */
e12f3d7a
CK
1564 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
1565 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
0f30a397
CK
1566 .emit_ib = uvd_v6_0_ring_emit_ib,
1567 .emit_fence = uvd_v6_0_ring_emit_fence,
1568 .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
1569 .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
996cab95 1570 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
0f30a397 1571 .test_ring = uvd_v6_0_ring_test_ring,
8de190c9 1572 .test_ib = amdgpu_uvd_ring_test_ib,
afb1436c 1573 .insert_nop = uvd_v6_0_ring_insert_nop,
0f30a397 1574 .pad_ib = amdgpu_ring_generic_pad_ib,
c4120d55
CK
1575 .begin_use = amdgpu_uvd_ring_begin_use,
1576 .end_use = amdgpu_uvd_ring_end_use,
25299898 1577 .emit_wreg = uvd_v6_0_ring_emit_wreg,
0f30a397
CK
1578};
1579
c259ee6e
JZ
1580static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
1581 .type = AMDGPU_RING_TYPE_UVD_ENC,
1582 .align_mask = 0x3f,
1583 .nop = HEVC_ENC_CMD_NO_OP,
1584 .support_64bit_ptrs = false,
7ee250b1 1585 .no_user_fence = true,
c259ee6e
JZ
1586 .get_rptr = uvd_v6_0_enc_ring_get_rptr,
1587 .get_wptr = uvd_v6_0_enc_ring_get_wptr,
1588 .set_wptr = uvd_v6_0_enc_ring_set_wptr,
1589 .emit_frame_size =
1590 4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */
60b431b5 1591 5 + /* uvd_v6_0_enc_ring_emit_vm_flush */
c259ee6e
JZ
1592 5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */
1593 1, /* uvd_v6_0_enc_ring_insert_end */
1594 .emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */
1595 .emit_ib = uvd_v6_0_enc_ring_emit_ib,
1596 .emit_fence = uvd_v6_0_enc_ring_emit_fence,
1597 .emit_vm_flush = uvd_v6_0_enc_ring_emit_vm_flush,
1598 .emit_pipeline_sync = uvd_v6_0_enc_ring_emit_pipeline_sync,
2a91f272 1599 .test_ring = uvd_v6_0_enc_ring_test_ring,
e0128efb 1600 .test_ib = uvd_v6_0_enc_ring_test_ib,
c259ee6e
JZ
1601 .insert_nop = amdgpu_ring_insert_nop,
1602 .insert_end = uvd_v6_0_enc_ring_insert_end,
1603 .pad_ib = amdgpu_ring_generic_pad_ib,
1604 .begin_use = amdgpu_uvd_ring_begin_use,
1605 .end_use = amdgpu_uvd_ring_end_use,
1606};
1607
aaa36a97
AD
1608static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
1609{
a05c92d1 1610 if (adev->asic_type >= CHIP_POLARIS10) {
2bb795f5 1611 adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs;
0f30a397
CK
1612 DRM_INFO("UVD is enabled in VM mode\n");
1613 } else {
2bb795f5 1614 adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs;
0f30a397
CK
1615 DRM_INFO("UVD is enabled in physical mode\n");
1616 }
aaa36a97
AD
1617}
1618
c259ee6e
JZ
1619static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1620{
1621 int i;
1622
1623 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
2bb795f5 1624 adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
c259ee6e
JZ
1625
1626 DRM_INFO("UVD ENC is enabled in VM mode\n");
1627}
1628
aaa36a97
AD
1629static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
1630 .set = uvd_v6_0_set_interrupt_state,
1631 .process = uvd_v6_0_process_interrupt,
1632};
1633
1634static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1635{
65da0d40 1636 if (uvd_v6_0_enc_support(adev))
2bb795f5 1637 adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1;
65da0d40 1638 else
2bb795f5 1639 adev->uvd.inst->irq.num_types = 1;
65da0d40 1640
2bb795f5 1641 adev->uvd.inst->irq.funcs = &uvd_v6_0_irq_funcs;
aaa36a97 1642}
a1255107
AD
1643
1644const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
1645{
1646 .type = AMD_IP_BLOCK_TYPE_UVD,
1647 .major = 6,
1648 .minor = 0,
1649 .rev = 0,
1650 .funcs = &uvd_v6_0_ip_funcs,
1651};
1652
1653const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
1654{
1655 .type = AMD_IP_BLOCK_TYPE_UVD,
1656 .major = 6,
1657 .minor = 2,
1658 .rev = 0,
1659 .funcs = &uvd_v6_0_ip_funcs,
1660};
1661
1662const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
1663{
1664 .type = AMD_IP_BLOCK_TYPE_UVD,
1665 .major = 6,
1666 .minor = 3,
1667 .rev = 0,
1668 .funcs = &uvd_v6_0_ip_funcs,
1669};