drm/amdgpu: implement gmc_v8_0_emit_flush_gpu_tlb
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / uvd_v7_0.c
CommitLineData
09bfb891
LL
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include <drm/drmP.h>
26#include "amdgpu.h"
27#include "amdgpu_uvd.h"
28#include "soc15d.h"
29#include "soc15_common.h"
247ac951 30#include "mmsch_v1_0.h"
09bfb891 31
5d735f83
FX
32#include "uvd/uvd_7_0_offset.h"
33#include "uvd/uvd_7_0_sh_mask.h"
18297a21
FX
34#include "vce/vce_4_0_offset.h"
35#include "vce/vce_4_0_default.h"
36#include "vce/vce_4_0_sh_mask.h"
daad67b5 37#include "nbif/nbif_6_1_offset.h"
75199b8c 38#include "hdp/hdp_4_0_offset.h"
65417d9f
FX
39#include "mmhub/mmhub_1_0_offset.h"
40#include "mmhub/mmhub_1_0_sh_mask.h"
09bfb891
LL
41
42static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
43static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
44static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
45static int uvd_v7_0_start(struct amdgpu_device *adev);
46static void uvd_v7_0_stop(struct amdgpu_device *adev);
247ac951 47static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
09bfb891
LL
48
49/**
50 * uvd_v7_0_ring_get_rptr - get read pointer
51 *
52 * @ring: amdgpu_ring pointer
53 *
54 * Returns the current hardware read pointer
55 */
56static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
57{
58 struct amdgpu_device *adev = ring->adev;
59
4ad5751a 60 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
09bfb891
LL
61}
62
63/**
64 * uvd_v7_0_enc_ring_get_rptr - get enc read pointer
65 *
66 * @ring: amdgpu_ring pointer
67 *
68 * Returns the current hardware enc read pointer
69 */
70static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
71{
72 struct amdgpu_device *adev = ring->adev;
73
74 if (ring == &adev->uvd.ring_enc[0])
4ad5751a 75 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
09bfb891 76 else
4ad5751a 77 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
09bfb891
LL
78}
79
80/**
81 * uvd_v7_0_ring_get_wptr - get write pointer
82 *
83 * @ring: amdgpu_ring pointer
84 *
85 * Returns the current hardware write pointer
86 */
87static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
88{
89 struct amdgpu_device *adev = ring->adev;
90
4ad5751a 91 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
09bfb891
LL
92}
93
94/**
95 * uvd_v7_0_enc_ring_get_wptr - get enc write pointer
96 *
97 * @ring: amdgpu_ring pointer
98 *
99 * Returns the current hardware enc write pointer
100 */
101static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
102{
103 struct amdgpu_device *adev = ring->adev;
104
beb2ced5
FM
105 if (ring->use_doorbell)
106 return adev->wb.wb[ring->wptr_offs];
107
09bfb891 108 if (ring == &adev->uvd.ring_enc[0])
4ad5751a 109 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
09bfb891 110 else
4ad5751a 111 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
09bfb891
LL
112}
113
114/**
115 * uvd_v7_0_ring_set_wptr - set write pointer
116 *
117 * @ring: amdgpu_ring pointer
118 *
119 * Commits the write pointer to the hardware
120 */
121static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
122{
123 struct amdgpu_device *adev = ring->adev;
124
4ad5751a 125 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
09bfb891
LL
126}
127
128/**
129 * uvd_v7_0_enc_ring_set_wptr - set enc write pointer
130 *
131 * @ring: amdgpu_ring pointer
132 *
133 * Commits the enc write pointer to the hardware
134 */
135static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
136{
137 struct amdgpu_device *adev = ring->adev;
138
beb2ced5
FM
139 if (ring->use_doorbell) {
140 /* XXX check if swapping is necessary on BE */
141 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
142 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
143 return;
144 }
145
09bfb891 146 if (ring == &adev->uvd.ring_enc[0])
4ad5751a 147 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR,
09bfb891
LL
148 lower_32_bits(ring->wptr));
149 else
4ad5751a 150 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2,
09bfb891
LL
151 lower_32_bits(ring->wptr));
152}
153
154/**
155 * uvd_v7_0_enc_ring_test_ring - test if UVD ENC ring is working
156 *
157 * @ring: the engine to test on
158 *
159 */
160static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
161{
162 struct amdgpu_device *adev = ring->adev;
163 uint32_t rptr = amdgpu_ring_get_rptr(ring);
164 unsigned i;
165 int r;
166
a1b9022a
FM
167 if (amdgpu_sriov_vf(adev))
168 return 0;
169
09bfb891
LL
170 r = amdgpu_ring_alloc(ring, 16);
171 if (r) {
172 DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n",
173 ring->idx, r);
174 return r;
175 }
176 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
177 amdgpu_ring_commit(ring);
178
179 for (i = 0; i < adev->usec_timeout; i++) {
180 if (amdgpu_ring_get_rptr(ring) != rptr)
181 break;
182 DRM_UDELAY(1);
183 }
184
185 if (i < adev->usec_timeout) {
9953b72f 186 DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
09bfb891
LL
187 ring->idx, i);
188 } else {
189 DRM_ERROR("amdgpu: ring %d test failed\n",
190 ring->idx);
191 r = -ETIMEDOUT;
192 }
193
194 return r;
195}
196
197/**
198 * uvd_v7_0_enc_get_create_msg - generate a UVD ENC create msg
199 *
200 * @adev: amdgpu_device pointer
201 * @ring: ring we should submit the msg to
202 * @handle: session handle to use
203 * @fence: optional fence to return
204 *
205 * Open up a stream for HW test
206 */
207static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
208 struct dma_fence **fence)
209{
210 const unsigned ib_size_dw = 16;
211 struct amdgpu_job *job;
212 struct amdgpu_ib *ib;
213 struct dma_fence *f = NULL;
214 uint64_t dummy;
215 int i, r;
216
217 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
218 if (r)
219 return r;
220
221 ib = &job->ibs[0];
222 dummy = ib->gpu_addr + 1024;
223
224 ib->length_dw = 0;
225 ib->ptr[ib->length_dw++] = 0x00000018;
226 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
227 ib->ptr[ib->length_dw++] = handle;
228 ib->ptr[ib->length_dw++] = 0x00000000;
229 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
230 ib->ptr[ib->length_dw++] = dummy;
231
232 ib->ptr[ib->length_dw++] = 0x00000014;
233 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
234 ib->ptr[ib->length_dw++] = 0x0000001c;
235 ib->ptr[ib->length_dw++] = 0x00000000;
236 ib->ptr[ib->length_dw++] = 0x00000000;
237
238 ib->ptr[ib->length_dw++] = 0x00000008;
239 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
240
241 for (i = ib->length_dw; i < ib_size_dw; ++i)
242 ib->ptr[i] = 0x0;
243
244 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
245 job->fence = dma_fence_get(f);
246 if (r)
247 goto err;
248
249 amdgpu_job_free(job);
250 if (fence)
251 *fence = dma_fence_get(f);
252 dma_fence_put(f);
253 return 0;
254
255err:
256 amdgpu_job_free(job);
257 return r;
258}
259
260/**
261 * uvd_v7_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
262 *
263 * @adev: amdgpu_device pointer
264 * @ring: ring we should submit the msg to
265 * @handle: session handle to use
266 * @fence: optional fence to return
267 *
268 * Close up a stream for HW test or if userspace failed to do so
269 */
270int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
271 bool direct, struct dma_fence **fence)
272{
273 const unsigned ib_size_dw = 16;
274 struct amdgpu_job *job;
275 struct amdgpu_ib *ib;
276 struct dma_fence *f = NULL;
277 uint64_t dummy;
278 int i, r;
279
280 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
281 if (r)
282 return r;
283
284 ib = &job->ibs[0];
285 dummy = ib->gpu_addr + 1024;
286
287 ib->length_dw = 0;
288 ib->ptr[ib->length_dw++] = 0x00000018;
289 ib->ptr[ib->length_dw++] = 0x00000001;
290 ib->ptr[ib->length_dw++] = handle;
291 ib->ptr[ib->length_dw++] = 0x00000000;
292 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
293 ib->ptr[ib->length_dw++] = dummy;
294
295 ib->ptr[ib->length_dw++] = 0x00000014;
296 ib->ptr[ib->length_dw++] = 0x00000002;
297 ib->ptr[ib->length_dw++] = 0x0000001c;
298 ib->ptr[ib->length_dw++] = 0x00000000;
299 ib->ptr[ib->length_dw++] = 0x00000000;
300
301 ib->ptr[ib->length_dw++] = 0x00000008;
302 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
303
304 for (i = ib->length_dw; i < ib_size_dw; ++i)
305 ib->ptr[i] = 0x0;
306
307 if (direct) {
308 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
309 job->fence = dma_fence_get(f);
310 if (r)
311 goto err;
312
313 amdgpu_job_free(job);
314 } else {
315 r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
316 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
317 if (r)
318 goto err;
319 }
320
321 if (fence)
322 *fence = dma_fence_get(f);
323 dma_fence_put(f);
324 return 0;
325
326err:
327 amdgpu_job_free(job);
328 return r;
329}
330
331/**
332 * uvd_v7_0_enc_ring_test_ib - test if UVD ENC IBs are working
333 *
334 * @ring: the engine to test on
335 *
336 */
337static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
338{
339 struct dma_fence *fence = NULL;
340 long r;
341
342 r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL);
343 if (r) {
344 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
345 goto error;
346 }
347
348 r = uvd_v7_0_enc_get_destroy_msg(ring, 1, true, &fence);
349 if (r) {
350 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
351 goto error;
352 }
353
354 r = dma_fence_wait_timeout(fence, false, timeout);
355 if (r == 0) {
356 DRM_ERROR("amdgpu: IB test timed out.\n");
357 r = -ETIMEDOUT;
358 } else if (r < 0) {
359 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
360 } else {
9953b72f 361 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
09bfb891
LL
362 r = 0;
363 }
364error:
365 dma_fence_put(fence);
366 return r;
367}
368
369static int uvd_v7_0_early_init(void *handle)
370{
371 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
372
6fa336a7
FM
373 if (amdgpu_sriov_vf(adev))
374 adev->uvd.num_enc_rings = 1;
375 else
376 adev->uvd.num_enc_rings = 2;
09bfb891
LL
377 uvd_v7_0_set_ring_funcs(adev);
378 uvd_v7_0_set_enc_ring_funcs(adev);
379 uvd_v7_0_set_irq_funcs(adev);
380
381 return 0;
382}
383
384static int uvd_v7_0_sw_init(void *handle)
385{
386 struct amdgpu_ring *ring;
1b1f42d8 387 struct drm_sched_rq *rq;
09bfb891
LL
388 int i, r;
389 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
390
391 /* UVD TRAP */
392 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UVD, 124, &adev->uvd.irq);
393 if (r)
394 return r;
395
396 /* UVD ENC TRAP */
397 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
398 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UVD, i + 119, &adev->uvd.irq);
399 if (r)
400 return r;
401 }
402
403 r = amdgpu_uvd_sw_init(adev);
404 if (r)
405 return r;
406
407 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
408 const struct common_firmware_header *hdr;
409 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
410 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].ucode_id = AMDGPU_UCODE_ID_UVD;
411 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
412 adev->firmware.fw_size +=
413 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
414 DRM_INFO("PSP loading UVD firmware\n");
415 }
416
417 ring = &adev->uvd.ring_enc[0];
1b1f42d8
LS
418 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
419 r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
b3eebe3d 420 rq, amdgpu_sched_jobs, NULL);
09bfb891
LL
421 if (r) {
422 DRM_ERROR("Failed setting up UVD ENC run queue.\n");
423 return r;
424 }
425
426 r = amdgpu_uvd_resume(adev);
427 if (r)
428 return r;
6fa336a7
FM
429 if (!amdgpu_sriov_vf(adev)) {
430 ring = &adev->uvd.ring;
431 sprintf(ring->name, "uvd");
432 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
433 if (r)
434 return r;
435 }
09bfb891 436
09bfb891
LL
437 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
438 ring = &adev->uvd.ring_enc[i];
439 sprintf(ring->name, "uvd_enc%d", i);
beb2ced5
FM
440 if (amdgpu_sriov_vf(adev)) {
441 ring->use_doorbell = true;
4ed11d79
FM
442
443 /* currently only use the first enconding ring for
444 * sriov, so set unused location for other unused rings.
445 */
446 if (i == 0)
447 ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2;
448 else
449 ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1;
beb2ced5 450 }
09bfb891
LL
451 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
452 if (r)
453 return r;
454 }
455
6fa336a7
FM
456 r = amdgpu_virt_alloc_mm_table(adev);
457 if (r)
458 return r;
459
09bfb891
LL
460 return r;
461}
462
463static int uvd_v7_0_sw_fini(void *handle)
464{
465 int i, r;
466 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
467
6fa336a7
FM
468 amdgpu_virt_free_mm_table(adev);
469
09bfb891
LL
470 r = amdgpu_uvd_suspend(adev);
471 if (r)
472 return r;
473
1b1f42d8 474 drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
09bfb891
LL
475
476 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
477 amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
478
50237287 479 return amdgpu_uvd_sw_fini(adev);
09bfb891
LL
480}
481
482/**
483 * uvd_v7_0_hw_init - start and test UVD block
484 *
485 * @adev: amdgpu_device pointer
486 *
487 * Initialize the hardware, boot up the VCPU and do some testing
488 */
489static int uvd_v7_0_hw_init(void *handle)
490{
491 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
492 struct amdgpu_ring *ring = &adev->uvd.ring;
493 uint32_t tmp;
494 int i, r;
495
6fa336a7
FM
496 if (amdgpu_sriov_vf(adev))
497 r = uvd_v7_0_sriov_start(adev);
498 else
499 r = uvd_v7_0_start(adev);
09bfb891
LL
500 if (r)
501 goto done;
502
6fa336a7
FM
503 if (!amdgpu_sriov_vf(adev)) {
504 ring->ready = true;
505 r = amdgpu_ring_test_ring(ring);
506 if (r) {
507 ring->ready = false;
508 goto done;
509 }
09bfb891 510
6fa336a7
FM
511 r = amdgpu_ring_alloc(ring, 10);
512 if (r) {
513 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
514 goto done;
515 }
09bfb891 516
6fa336a7
FM
517 tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
518 mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
519 amdgpu_ring_write(ring, tmp);
520 amdgpu_ring_write(ring, 0xFFFFF);
09bfb891 521
6fa336a7
FM
522 tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
523 mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
524 amdgpu_ring_write(ring, tmp);
525 amdgpu_ring_write(ring, 0xFFFFF);
09bfb891 526
6fa336a7
FM
527 tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
528 mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
529 amdgpu_ring_write(ring, tmp);
530 amdgpu_ring_write(ring, 0xFFFFF);
09bfb891 531
6fa336a7
FM
532 /* Clear timeout status bits */
533 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0,
534 mmUVD_SEMA_TIMEOUT_STATUS), 0));
535 amdgpu_ring_write(ring, 0x8);
09bfb891 536
6fa336a7
FM
537 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0,
538 mmUVD_SEMA_CNTL), 0));
539 amdgpu_ring_write(ring, 3);
09bfb891 540
6fa336a7
FM
541 amdgpu_ring_commit(ring);
542 }
09bfb891
LL
543
544 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
545 ring = &adev->uvd.ring_enc[i];
546 ring->ready = true;
547 r = amdgpu_ring_test_ring(ring);
548 if (r) {
549 ring->ready = false;
550 goto done;
551 }
552 }
553
554done:
555 if (!r)
556 DRM_INFO("UVD and UVD ENC initialized successfully.\n");
557
558 return r;
559}
560
561/**
562 * uvd_v7_0_hw_fini - stop the hardware block
563 *
564 * @adev: amdgpu_device pointer
565 *
566 * Stop the UVD block, mark ring as not ready any more
567 */
568static int uvd_v7_0_hw_fini(void *handle)
569{
570 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
571 struct amdgpu_ring *ring = &adev->uvd.ring;
572
5dd696ae
TH
573 if (!amdgpu_sriov_vf(adev))
574 uvd_v7_0_stop(adev);
575 else {
576 /* full access mode, so don't touch any UVD register */
577 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
578 }
579
09bfb891
LL
580 ring->ready = false;
581
582 return 0;
583}
584
585static int uvd_v7_0_suspend(void *handle)
586{
587 int r;
588 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
589
590 r = uvd_v7_0_hw_fini(adev);
591 if (r)
592 return r;
593
4a0144bf 594 return amdgpu_uvd_suspend(adev);
09bfb891
LL
595}
596
597static int uvd_v7_0_resume(void *handle)
598{
599 int r;
600 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
601
4a0144bf
TSD
602 r = amdgpu_uvd_resume(adev);
603 if (r)
604 return r;
605
50237287 606 return uvd_v7_0_hw_init(adev);
09bfb891
LL
607}
608
609/**
610 * uvd_v7_0_mc_resume - memory controller programming
611 *
612 * @adev: amdgpu_device pointer
613 *
614 * Let the UVD memory controller know it's offsets
615 */
616static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
617{
c1fe75c9 618 uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
09bfb891
LL
619 uint32_t offset;
620
621 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
4ad5751a 622 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
09bfb891 623 lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
4ad5751a 624 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
09bfb891
LL
625 upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
626 offset = 0;
627 } else {
4ad5751a 628 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
09bfb891 629 lower_32_bits(adev->uvd.gpu_addr));
4ad5751a 630 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
09bfb891
LL
631 upper_32_bits(adev->uvd.gpu_addr));
632 offset = size;
633 }
634
4ad5751a 635 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
09bfb891 636 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
4ad5751a 637 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
09bfb891 638
4ad5751a 639 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
09bfb891 640 lower_32_bits(adev->uvd.gpu_addr + offset));
4ad5751a 641 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
09bfb891 642 upper_32_bits(adev->uvd.gpu_addr + offset));
4ad5751a
TSD
643 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
644 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
09bfb891 645
4ad5751a 646 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
09bfb891 647 lower_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
4ad5751a 648 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
09bfb891 649 upper_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
4ad5751a
TSD
650 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
651 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
09bfb891
LL
652 AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
653
4ad5751a 654 WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
09bfb891 655 adev->gfx.config.gb_addr_config);
4ad5751a 656 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
09bfb891 657 adev->gfx.config.gb_addr_config);
4ad5751a 658 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
09bfb891
LL
659 adev->gfx.config.gb_addr_config);
660
4ad5751a 661 WREG32_SOC15(UVD, 0, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
09bfb891
LL
662}
663
247ac951
FM
664static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
665 struct amdgpu_mm_table *table)
666{
667 uint32_t data = 0, loop;
668 uint64_t addr = table->gpu_addr;
669 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
670 uint32_t size;
671
672 size = header->header_size + header->vce_table_size + header->uvd_table_size;
673
674 /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
4ad5751a
TSD
675 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
676 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
247ac951
FM
677
678 /* 2, update vmid of descriptor */
4ad5751a 679 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID);
247ac951
FM
680 data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
681 data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
4ad5751a 682 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID, data);
247ac951
FM
683
684 /* 3, notify mmsch about the size of this descriptor */
4ad5751a 685 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE, size);
247ac951
FM
686
687 /* 4, set resp to zero */
4ad5751a 688 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
247ac951 689
ab2b2e4f
FM
690 WDOORBELL32(adev->uvd.ring_enc[0].doorbell_index, 0);
691 adev->wb.wb[adev->uvd.ring_enc[0].wptr_offs] = 0;
692 adev->uvd.ring_enc[0].wptr = 0;
693 adev->uvd.ring_enc[0].wptr_old = 0;
694
247ac951 695 /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
4ad5751a 696 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
247ac951 697
4ad5751a 698 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
247ac951
FM
699 loop = 1000;
700 while ((data & 0x10000002) != 0x10000002) {
701 udelay(10);
4ad5751a 702 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
247ac951
FM
703 loop--;
704 if (!loop)
705 break;
706 }
707
708 if (!loop) {
709 dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
710 return -EBUSY;
711 }
712
713 return 0;
714}
715
716static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
717{
718 struct amdgpu_ring *ring;
719 uint32_t offset, size, tmp;
720 uint32_t table_size = 0;
721 struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} };
722 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
723 struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} };
247ac951
FM
724 struct mmsch_v1_0_cmd_end end = { {0} };
725 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
726 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
727
728 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
729 direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
730 direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
731 end.cmd_header.command_type = MMSCH_COMMAND__END;
732
733 if (header->uvd_table_offset == 0 && header->uvd_table_size == 0) {
734 header->version = MMSCH_VERSION;
735 header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
736
737 if (header->vce_table_offset == 0 && header->vce_table_size == 0)
738 header->uvd_table_offset = header->header_size;
739 else
740 header->uvd_table_offset = header->vce_table_size + header->vce_table_offset;
741
742 init_table += header->uvd_table_offset;
743
744 ring = &adev->uvd.ring;
81fe3f35 745 ring->wptr = 0;
247ac951
FM
746 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
747
247ac951
FM
748 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS),
749 0xFFFFFFFF, 0x00000004);
750 /* mc resume*/
751 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
752 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
753 lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
754 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
755 upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
756 offset = 0;
757 } else {
758 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
759 lower_32_bits(adev->uvd.gpu_addr));
760 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
761 upper_32_bits(adev->uvd.gpu_addr));
762 offset = size;
763 }
764
765 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
766 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
767 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size);
768
769 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
770 lower_32_bits(adev->uvd.gpu_addr + offset));
771 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
772 upper_32_bits(adev->uvd.gpu_addr + offset));
773 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
774 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
775
776 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
777 lower_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
778 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
779 upper_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
780 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
781 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2),
782 AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
783
247ac951
FM
784 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
785 /* mc resume end*/
786
787 /* disable clock gating */
788 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL),
789 ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
790
791 /* disable interupt */
792 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
793 ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
794
795 /* stall UMC and register bus before resetting VCPU */
796 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
797 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
798 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
799
800 /* put LMI, VCPU, RBC etc... into reset */
801 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
802 (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
803 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
804 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
805 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
806 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
807 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
808 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
809 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
810
811 /* initialize UVD memory controller */
812 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL),
813 (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
814 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
815 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
816 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
817 UVD_LMI_CTRL__REQ_MODE_MASK |
818 0x00100000L));
819
247ac951
FM
820 /* take all subblocks out of reset, except VCPU */
821 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
822 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
823
824 /* enable VCPU clock */
825 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
826 UVD_VCPU_CNTL__CLK_EN_MASK);
827
247ac951
FM
828 /* enable master interrupt */
829 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
830 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
831 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
832
833 /* clear the bit 4 of UVD_STATUS */
834 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS),
835 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
836
837 /* force RBC into idle state */
838 size = order_base_2(ring->ring_size);
839 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
247ac951 840 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
247ac951
FM
841 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp);
842
247ac951 843 ring = &adev->uvd.ring_enc[0];
81fe3f35 844 ring->wptr = 0;
247ac951
FM
845 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO), ring->gpu_addr);
846 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
847 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE), ring->ring_size / 4);
848
81fe3f35
FM
849 /* boot up the VCPU */
850 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0);
851
852 /* enable UMC */
853 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
854 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
855
856 MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0x02, 0x02);
857
247ac951
FM
858 /* add end packet */
859 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
860 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
861 header->uvd_table_size = table_size;
862
247ac951 863 }
257deb8c 864 return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
247ac951
FM
865}
866
09bfb891
LL
867/**
868 * uvd_v7_0_start - start UVD block
869 *
870 * @adev: amdgpu_device pointer
871 *
872 * Setup and start the UVD block
873 */
874static int uvd_v7_0_start(struct amdgpu_device *adev)
875{
876 struct amdgpu_ring *ring = &adev->uvd.ring;
877 uint32_t rb_bufsz, tmp;
878 uint32_t lmi_swap_cntl;
879 uint32_t mp_swap_cntl;
880 int i, j, r;
881
882 /* disable DPG */
883 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
884 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
885
886 /* disable byte swapping */
887 lmi_swap_cntl = 0;
888 mp_swap_cntl = 0;
889
890 uvd_v7_0_mc_resume(adev);
891
892 /* disable clock gating */
893 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL), 0,
894 ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
895
896 /* disable interupt */
897 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
898 ~UVD_MASTINT_EN__VCPU_EN_MASK);
899
900 /* stall UMC and register bus before resetting VCPU */
901 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
902 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
903 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
904 mdelay(1);
905
906 /* put LMI, VCPU, RBC etc... into reset */
4ad5751a 907 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
09bfb891
LL
908 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
909 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
910 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
911 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
912 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
913 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
914 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
915 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
916 mdelay(5);
917
918 /* initialize UVD memory controller */
4ad5751a 919 WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL,
09bfb891
LL
920 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
921 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
922 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
923 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
924 UVD_LMI_CTRL__REQ_MODE_MASK |
925 0x00100000L);
926
927#ifdef __BIG_ENDIAN
928 /* swap (8 in 32) RB and IB */
929 lmi_swap_cntl = 0xa;
930 mp_swap_cntl = 0;
931#endif
4ad5751a
TSD
932 WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
933 WREG32_SOC15(UVD, 0, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
09bfb891 934
4ad5751a
TSD
935 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0, 0x40c2040);
936 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA1, 0x0);
937 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0, 0x40c2040);
938 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB1, 0x0);
939 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_ALU, 0);
940 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX, 0x88);
09bfb891
LL
941
942 /* take all subblocks out of reset, except VCPU */
4ad5751a 943 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
09bfb891
LL
944 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
945 mdelay(5);
946
947 /* enable VCPU clock */
4ad5751a 948 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL,
09bfb891
LL
949 UVD_VCPU_CNTL__CLK_EN_MASK);
950
951 /* enable UMC */
952 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
953 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
954
955 /* boot up the VCPU */
4ad5751a 956 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, 0);
09bfb891
LL
957 mdelay(10);
958
959 for (i = 0; i < 10; ++i) {
960 uint32_t status;
961
962 for (j = 0; j < 100; ++j) {
4ad5751a 963 status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
09bfb891
LL
964 if (status & 2)
965 break;
966 mdelay(10);
967 }
968 r = 0;
969 if (status & 2)
970 break;
971
972 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
973 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
974 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
975 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
976 mdelay(10);
977 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
978 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
979 mdelay(10);
980 r = -1;
981 }
982
983 if (r) {
984 DRM_ERROR("UVD not responding, giving up!!!\n");
985 return r;
986 }
987 /* enable master interrupt */
988 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
989 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
990 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
991
992 /* clear the bit 4 of UVD_STATUS */
993 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
994 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
995
996 /* force RBC into idle state */
997 rb_bufsz = order_base_2(ring->ring_size);
998 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
999 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1000 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1001 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
1002 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1003 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
4ad5751a 1004 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
09bfb891
LL
1005
1006 /* set the write pointer delay */
4ad5751a 1007 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
09bfb891
LL
1008
1009 /* set the wb address */
4ad5751a 1010 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
09bfb891
LL
1011 (upper_32_bits(ring->gpu_addr) >> 2));
1012
1013 /* programm the RB_BASE for ring buffer */
4ad5751a 1014 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
09bfb891 1015 lower_32_bits(ring->gpu_addr));
4ad5751a 1016 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
09bfb891
LL
1017 upper_32_bits(ring->gpu_addr));
1018
1019 /* Initialize the ring buffer's read and write pointers */
4ad5751a 1020 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
09bfb891 1021
4ad5751a
TSD
1022 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1023 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
09bfb891
LL
1024 lower_32_bits(ring->wptr));
1025
1026 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
1027 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1028
1029 ring = &adev->uvd.ring_enc[0];
4ad5751a
TSD
1030 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1031 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1032 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1033 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1034 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
09bfb891
LL
1035
1036 ring = &adev->uvd.ring_enc[1];
4ad5751a
TSD
1037 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1038 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1039 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1040 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1041 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
09bfb891
LL
1042
1043 return 0;
1044}
1045
1046/**
1047 * uvd_v7_0_stop - stop UVD block
1048 *
1049 * @adev: amdgpu_device pointer
1050 *
1051 * stop the UVD block
1052 */
1053static void uvd_v7_0_stop(struct amdgpu_device *adev)
1054{
1055 /* force RBC into idle state */
4ad5751a 1056 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, 0x11010101);
09bfb891
LL
1057
1058 /* Stall UMC and register bus before resetting VCPU */
1059 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
1060 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1061 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1062 mdelay(1);
1063
1064 /* put VCPU into reset */
4ad5751a 1065 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
09bfb891
LL
1066 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1067 mdelay(5);
1068
1069 /* disable VCPU clock */
4ad5751a 1070 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, 0x0);
09bfb891
LL
1071
1072 /* Unstall UMC and register bus */
1073 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
1074 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1075}
1076
1077/**
1078 * uvd_v7_0_ring_emit_fence - emit an fence & trap command
1079 *
1080 * @ring: amdgpu_ring pointer
1081 * @fence: fence to emit
1082 *
1083 * Write a fence and a trap command to the ring.
1084 */
1085static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1086 unsigned flags)
1087{
cd29253f
SL
1088 struct amdgpu_device *adev = ring->adev;
1089
09bfb891
LL
1090 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1091
1092 amdgpu_ring_write(ring,
1093 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
1094 amdgpu_ring_write(ring, seq);
1095 amdgpu_ring_write(ring,
1096 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1097 amdgpu_ring_write(ring, addr & 0xffffffff);
1098 amdgpu_ring_write(ring,
1099 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1100 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1101 amdgpu_ring_write(ring,
1102 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1103 amdgpu_ring_write(ring, 0);
1104
1105 amdgpu_ring_write(ring,
1106 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1107 amdgpu_ring_write(ring, 0);
1108 amdgpu_ring_write(ring,
1109 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1110 amdgpu_ring_write(ring, 0);
1111 amdgpu_ring_write(ring,
1112 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1113 amdgpu_ring_write(ring, 2);
1114}
1115
1116/**
1117 * uvd_v7_0_enc_ring_emit_fence - emit an enc fence & trap command
1118 *
1119 * @ring: amdgpu_ring pointer
1120 * @fence: fence to emit
1121 *
1122 * Write enc a fence and a trap command to the ring.
1123 */
1124static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1125 u64 seq, unsigned flags)
1126{
cd29253f 1127
09bfb891
LL
1128 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1129
1130 amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
1131 amdgpu_ring_write(ring, addr);
1132 amdgpu_ring_write(ring, upper_32_bits(addr));
1133 amdgpu_ring_write(ring, seq);
1134 amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
1135}
1136
1137/**
1138 * uvd_v7_0_ring_emit_hdp_flush - emit an hdp flush
1139 *
1140 * @ring: amdgpu_ring pointer
1141 *
1142 * Emits an hdp flush.
1143 */
1144static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1145{
cd29253f
SL
1146 struct amdgpu_device *adev = ring->adev;
1147
09bfb891
LL
1148 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(NBIF, 0,
1149 mmHDP_MEM_COHERENCY_FLUSH_CNTL), 0));
1150 amdgpu_ring_write(ring, 0);
1151}
1152
1153/**
1154 * uvd_v7_0_ring_hdp_invalidate - emit an hdp invalidate
1155 *
1156 * @ring: amdgpu_ring pointer
1157 *
1158 * Emits an hdp invalidate.
1159 */
1160static void uvd_v7_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
1161{
cd29253f
SL
1162 struct amdgpu_device *adev = ring->adev;
1163
6e2e216f 1164 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 0));
09bfb891
LL
1165 amdgpu_ring_write(ring, 1);
1166}
1167
1168/**
1169 * uvd_v7_0_ring_test_ring - register write test
1170 *
1171 * @ring: amdgpu_ring pointer
1172 *
1173 * Test if we can successfully write to the context register
1174 */
1175static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
1176{
1177 struct amdgpu_device *adev = ring->adev;
1178 uint32_t tmp = 0;
1179 unsigned i;
1180 int r;
1181
4ad5751a 1182 WREG32_SOC15(UVD, 0, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
09bfb891
LL
1183 r = amdgpu_ring_alloc(ring, 3);
1184 if (r) {
1185 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
1186 ring->idx, r);
1187 return r;
1188 }
1189 amdgpu_ring_write(ring,
1190 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
1191 amdgpu_ring_write(ring, 0xDEADBEEF);
1192 amdgpu_ring_commit(ring);
1193 for (i = 0; i < adev->usec_timeout; i++) {
4ad5751a 1194 tmp = RREG32_SOC15(UVD, 0, mmUVD_CONTEXT_ID);
09bfb891
LL
1195 if (tmp == 0xDEADBEEF)
1196 break;
1197 DRM_UDELAY(1);
1198 }
1199
1200 if (i < adev->usec_timeout) {
9953b72f 1201 DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
09bfb891
LL
1202 ring->idx, i);
1203 } else {
1204 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
1205 ring->idx, tmp);
1206 r = -EINVAL;
1207 }
1208 return r;
1209}
1210
1211/**
1212 * uvd_v7_0_ring_emit_ib - execute indirect buffer
1213 *
1214 * @ring: amdgpu_ring pointer
1215 * @ib: indirect buffer to execute
1216 *
1217 * Write ring commands to execute the indirect buffer
1218 */
1219static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
1220 struct amdgpu_ib *ib,
c4f46f22 1221 unsigned vmid, bool ctx_switch)
09bfb891 1222{
cd29253f
SL
1223 struct amdgpu_device *adev = ring->adev;
1224
09bfb891
LL
1225 amdgpu_ring_write(ring,
1226 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
c4f46f22 1227 amdgpu_ring_write(ring, vmid);
09bfb891
LL
1228
1229 amdgpu_ring_write(ring,
1230 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1231 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1232 amdgpu_ring_write(ring,
1233 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1234 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1235 amdgpu_ring_write(ring,
1236 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0));
1237 amdgpu_ring_write(ring, ib->length_dw);
1238}
1239
1240/**
1241 * uvd_v7_0_enc_ring_emit_ib - enc execute indirect buffer
1242 *
1243 * @ring: amdgpu_ring pointer
1244 * @ib: indirect buffer to execute
1245 *
1246 * Write enc ring commands to execute the indirect buffer
1247 */
1248static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
c4f46f22 1249 struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
09bfb891
LL
1250{
1251 amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
c4f46f22 1252 amdgpu_ring_write(ring, vmid);
09bfb891
LL
1253 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1254 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1255 amdgpu_ring_write(ring, ib->length_dw);
1256}
1257
b6cb3b5c
CK
1258static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
1259 uint32_t reg, uint32_t val)
09bfb891 1260{
cd29253f
SL
1261 struct amdgpu_device *adev = ring->adev;
1262
09bfb891
LL
1263 amdgpu_ring_write(ring,
1264 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
b6cb3b5c 1265 amdgpu_ring_write(ring, reg << 2);
09bfb891
LL
1266 amdgpu_ring_write(ring,
1267 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
b6cb3b5c 1268 amdgpu_ring_write(ring, val);
09bfb891
LL
1269 amdgpu_ring_write(ring,
1270 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1271 amdgpu_ring_write(ring, 8);
1272}
1273
1274static void uvd_v7_0_vm_reg_wait(struct amdgpu_ring *ring,
1275 uint32_t data0, uint32_t data1, uint32_t mask)
1276{
cd29253f
SL
1277 struct amdgpu_device *adev = ring->adev;
1278
09bfb891
LL
1279 amdgpu_ring_write(ring,
1280 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1281 amdgpu_ring_write(ring, data0);
1282 amdgpu_ring_write(ring,
1283 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1284 amdgpu_ring_write(ring, data1);
1285 amdgpu_ring_write(ring,
1286 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
1287 amdgpu_ring_write(ring, mask);
1288 amdgpu_ring_write(ring,
1289 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1290 amdgpu_ring_write(ring, 12);
1291}
1292
1293static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
5a4633c4
CK
1294 unsigned vmid, unsigned pasid,
1295 uint64_t pd_addr)
09bfb891 1296{
2e819849 1297 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
132f34e4 1298 uint32_t req = ring->adev->gmc.gmc_funcs->get_invalidate_req(vmid);
3de676d8 1299 uint64_t flags = AMDGPU_PTE_VALID;
4789c463 1300 unsigned eng = ring->vm_inv_eng;
3de676d8 1301 uint32_t data0, data1, mask;
09bfb891 1302
132f34e4 1303 amdgpu_gmc_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
3de676d8 1304 pd_addr |= flags;
09bfb891 1305
c4f46f22 1306 data0 = (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2;
2e819849 1307 data1 = upper_32_bits(pd_addr);
b6cb3b5c 1308 uvd_v7_0_ring_emit_wreg(ring, data0, data1);
2e819849 1309
c4f46f22 1310 data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2;
2e819849 1311 data1 = lower_32_bits(pd_addr);
b6cb3b5c 1312 uvd_v7_0_ring_emit_wreg(ring, data0, data1);
2e819849 1313
c4f46f22 1314 data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2;
2e819849
CK
1315 data1 = lower_32_bits(pd_addr);
1316 mask = 0xffffffff;
1317 uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
1318
1319 /* flush TLB */
1320 data0 = (hub->vm_inv_eng0_req + eng) << 2;
1321 data1 = req;
b6cb3b5c 1322 uvd_v7_0_ring_emit_wreg(ring, data0, data1);
2e819849
CK
1323
1324 /* wait for flush */
1325 data0 = (hub->vm_inv_eng0_ack + eng) << 2;
c4f46f22
CK
1326 data1 = 1 << vmid;
1327 mask = 1 << vmid;
2e819849 1328 uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
09bfb891
LL
1329}
1330
946a4d5b
SL
1331static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1332{
1333 int i;
1334 struct amdgpu_device *adev = ring->adev;
1335
1336 for (i = 0; i < count; i++)
1337 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
1338
1339}
1340
09bfb891
LL
1341static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1342{
1343 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1344}
1345
1346static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
5a4633c4
CK
1347 unsigned int vmid, unsigned pasid,
1348 uint64_t pd_addr)
09bfb891 1349{
2e819849 1350 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
132f34e4 1351 uint32_t req = ring->adev->gmc.gmc_funcs->get_invalidate_req(vmid);
3de676d8 1352 uint64_t flags = AMDGPU_PTE_VALID;
4789c463 1353 unsigned eng = ring->vm_inv_eng;
09bfb891 1354
132f34e4 1355 amdgpu_gmc_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
3de676d8 1356 pd_addr |= flags;
09bfb891 1357
2e819849 1358 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
c4f46f22 1359 amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2);
2e819849
CK
1360 amdgpu_ring_write(ring, upper_32_bits(pd_addr));
1361
1362 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
c4f46f22 1363 amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
2e819849
CK
1364 amdgpu_ring_write(ring, lower_32_bits(pd_addr));
1365
1366 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
c4f46f22 1367 amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
2e819849
CK
1368 amdgpu_ring_write(ring, 0xffffffff);
1369 amdgpu_ring_write(ring, lower_32_bits(pd_addr));
1370
1371 /* flush TLB */
1372 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1373 amdgpu_ring_write(ring, (hub->vm_inv_eng0_req + eng) << 2);
1374 amdgpu_ring_write(ring, req);
1375
1376 /* wait for flush */
1377 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1378 amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
c4f46f22
CK
1379 amdgpu_ring_write(ring, 1 << vmid);
1380 amdgpu_ring_write(ring, 1 << vmid);
09bfb891
LL
1381}
1382
b6cb3b5c
CK
1383static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1384 uint32_t reg, uint32_t val)
1385{
1386 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1387 amdgpu_ring_write(ring, reg << 2);
1388 amdgpu_ring_write(ring, val);
1389}
1390
09bfb891
LL
1391#if 0
1392static bool uvd_v7_0_is_idle(void *handle)
1393{
1394 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1395
1396 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1397}
1398
1399static int uvd_v7_0_wait_for_idle(void *handle)
1400{
1401 unsigned i;
1402 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1403
1404 for (i = 0; i < adev->usec_timeout; i++) {
1405 if (uvd_v7_0_is_idle(handle))
1406 return 0;
1407 }
1408 return -ETIMEDOUT;
1409}
1410
1411#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
1412static bool uvd_v7_0_check_soft_reset(void *handle)
1413{
1414 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1415 u32 srbm_soft_reset = 0;
1416 u32 tmp = RREG32(mmSRBM_STATUS);
1417
1418 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1419 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
4ad5751a
TSD
1420 (RREG32_SOC15(UVD, 0, mmUVD_STATUS) &
1421 AMDGPU_UVD_STATUS_BUSY_MASK))
09bfb891
LL
1422 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1423 SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1424
1425 if (srbm_soft_reset) {
1426 adev->uvd.srbm_soft_reset = srbm_soft_reset;
1427 return true;
1428 } else {
1429 adev->uvd.srbm_soft_reset = 0;
1430 return false;
1431 }
1432}
1433
1434static int uvd_v7_0_pre_soft_reset(void *handle)
1435{
1436 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1437
1438 if (!adev->uvd.srbm_soft_reset)
1439 return 0;
1440
1441 uvd_v7_0_stop(adev);
1442 return 0;
1443}
1444
1445static int uvd_v7_0_soft_reset(void *handle)
1446{
1447 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1448 u32 srbm_soft_reset;
1449
1450 if (!adev->uvd.srbm_soft_reset)
1451 return 0;
1452 srbm_soft_reset = adev->uvd.srbm_soft_reset;
1453
1454 if (srbm_soft_reset) {
1455 u32 tmp;
1456
1457 tmp = RREG32(mmSRBM_SOFT_RESET);
1458 tmp |= srbm_soft_reset;
1459 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1460 WREG32(mmSRBM_SOFT_RESET, tmp);
1461 tmp = RREG32(mmSRBM_SOFT_RESET);
1462
1463 udelay(50);
1464
1465 tmp &= ~srbm_soft_reset;
1466 WREG32(mmSRBM_SOFT_RESET, tmp);
1467 tmp = RREG32(mmSRBM_SOFT_RESET);
1468
1469 /* Wait a little for things to settle down */
1470 udelay(50);
1471 }
1472
1473 return 0;
1474}
1475
1476static int uvd_v7_0_post_soft_reset(void *handle)
1477{
1478 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1479
1480 if (!adev->uvd.srbm_soft_reset)
1481 return 0;
1482
1483 mdelay(5);
1484
1485 return uvd_v7_0_start(adev);
1486}
1487#endif
1488
1489static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
1490 struct amdgpu_irq_src *source,
1491 unsigned type,
1492 enum amdgpu_interrupt_state state)
1493{
1494 // TODO
1495 return 0;
1496}
1497
1498static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
1499 struct amdgpu_irq_src *source,
1500 struct amdgpu_iv_entry *entry)
1501{
1502 DRM_DEBUG("IH: UVD TRAP\n");
1503 switch (entry->src_id) {
1504 case 124:
1505 amdgpu_fence_process(&adev->uvd.ring);
1506 break;
1507 case 119:
1508 amdgpu_fence_process(&adev->uvd.ring_enc[0]);
1509 break;
1510 case 120:
6fa336a7
FM
1511 if (!amdgpu_sriov_vf(adev))
1512 amdgpu_fence_process(&adev->uvd.ring_enc[1]);
09bfb891
LL
1513 break;
1514 default:
1515 DRM_ERROR("Unhandled interrupt: %d %d\n",
1516 entry->src_id, entry->src_data[0]);
1517 break;
1518 }
1519
1520 return 0;
1521}
1522
1523#if 0
1524static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
1525{
1526 uint32_t data, data1, data2, suvd_flags;
1527
4ad5751a
TSD
1528 data = RREG32_SOC15(UVD, 0, mmUVD_CGC_CTRL);
1529 data1 = RREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE);
1530 data2 = RREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_CTRL);
09bfb891
LL
1531
1532 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1533 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1534
1535 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1536 UVD_SUVD_CGC_GATE__SIT_MASK |
1537 UVD_SUVD_CGC_GATE__SMP_MASK |
1538 UVD_SUVD_CGC_GATE__SCM_MASK |
1539 UVD_SUVD_CGC_GATE__SDB_MASK;
1540
1541 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1542 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1543 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1544
1545 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1546 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1547 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1548 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1549 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1550 UVD_CGC_CTRL__SYS_MODE_MASK |
1551 UVD_CGC_CTRL__UDEC_MODE_MASK |
1552 UVD_CGC_CTRL__MPEG2_MODE_MASK |
1553 UVD_CGC_CTRL__REGS_MODE_MASK |
1554 UVD_CGC_CTRL__RBC_MODE_MASK |
1555 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1556 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1557 UVD_CGC_CTRL__IDCT_MODE_MASK |
1558 UVD_CGC_CTRL__MPRD_MODE_MASK |
1559 UVD_CGC_CTRL__MPC_MODE_MASK |
1560 UVD_CGC_CTRL__LBSI_MODE_MASK |
1561 UVD_CGC_CTRL__LRBBM_MODE_MASK |
1562 UVD_CGC_CTRL__WCB_MODE_MASK |
1563 UVD_CGC_CTRL__VCPU_MODE_MASK |
1564 UVD_CGC_CTRL__JPEG_MODE_MASK |
1565 UVD_CGC_CTRL__JPEG2_MODE_MASK |
1566 UVD_CGC_CTRL__SCPU_MODE_MASK);
1567 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1568 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1569 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1570 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1571 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1572 data1 |= suvd_flags;
1573
4ad5751a
TSD
1574 WREG32_SOC15(UVD, 0, mmUVD_CGC_CTRL, data);
1575 WREG32_SOC15(UVD, 0, mmUVD_CGC_GATE, 0);
1576 WREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE, data1);
1577 WREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_CTRL, data2);
09bfb891
LL
1578}
1579
1580static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
1581{
1582 uint32_t data, data1, cgc_flags, suvd_flags;
1583
4ad5751a
TSD
1584 data = RREG32_SOC15(UVD, 0, mmUVD_CGC_GATE);
1585 data1 = RREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE);
09bfb891
LL
1586
1587 cgc_flags = UVD_CGC_GATE__SYS_MASK |
1588 UVD_CGC_GATE__UDEC_MASK |
1589 UVD_CGC_GATE__MPEG2_MASK |
1590 UVD_CGC_GATE__RBC_MASK |
1591 UVD_CGC_GATE__LMI_MC_MASK |
1592 UVD_CGC_GATE__IDCT_MASK |
1593 UVD_CGC_GATE__MPRD_MASK |
1594 UVD_CGC_GATE__MPC_MASK |
1595 UVD_CGC_GATE__LBSI_MASK |
1596 UVD_CGC_GATE__LRBBM_MASK |
1597 UVD_CGC_GATE__UDEC_RE_MASK |
1598 UVD_CGC_GATE__UDEC_CM_MASK |
1599 UVD_CGC_GATE__UDEC_IT_MASK |
1600 UVD_CGC_GATE__UDEC_DB_MASK |
1601 UVD_CGC_GATE__UDEC_MP_MASK |
1602 UVD_CGC_GATE__WCB_MASK |
1603 UVD_CGC_GATE__VCPU_MASK |
1604 UVD_CGC_GATE__SCPU_MASK |
1605 UVD_CGC_GATE__JPEG_MASK |
1606 UVD_CGC_GATE__JPEG2_MASK;
1607
1608 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1609 UVD_SUVD_CGC_GATE__SIT_MASK |
1610 UVD_SUVD_CGC_GATE__SMP_MASK |
1611 UVD_SUVD_CGC_GATE__SCM_MASK |
1612 UVD_SUVD_CGC_GATE__SDB_MASK;
1613
1614 data |= cgc_flags;
1615 data1 |= suvd_flags;
1616
4ad5751a
TSD
1617 WREG32_SOC15(UVD, 0, mmUVD_CGC_GATE, data);
1618 WREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE, data1);
09bfb891
LL
1619}
1620
1621static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
1622{
1623 u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
1624
1625 if (enable)
1626 tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1627 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1628 else
1629 tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1630 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1631
1632 WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
1633}
1634
1635
1636static int uvd_v7_0_set_clockgating_state(void *handle,
1637 enum amd_clockgating_state state)
1638{
1639 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1640 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1641
1642 uvd_v7_0_set_bypass_mode(adev, enable);
1643
1644 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
1645 return 0;
1646
1647 if (enable) {
1648 /* disable HW gating and enable Sw gating */
1649 uvd_v7_0_set_sw_clock_gating(adev);
1650 } else {
1651 /* wait for STATUS to clear */
1652 if (uvd_v7_0_wait_for_idle(handle))
1653 return -EBUSY;
1654
1655 /* enable HW gates because UVD is idle */
1656 /* uvd_v7_0_set_hw_clock_gating(adev); */
1657 }
1658
1659 return 0;
1660}
1661
1662static int uvd_v7_0_set_powergating_state(void *handle,
1663 enum amd_powergating_state state)
1664{
1665 /* This doesn't actually powergate the UVD block.
1666 * That's done in the dpm code via the SMC. This
1667 * just re-inits the block as necessary. The actual
1668 * gating still happens in the dpm code. We should
1669 * revisit this when there is a cleaner line between
1670 * the smc and the hw blocks
1671 */
1672 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1673
1674 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1675 return 0;
1676
4ad5751a 1677 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
09bfb891
LL
1678
1679 if (state == AMD_PG_STATE_GATE) {
1680 uvd_v7_0_stop(adev);
1681 return 0;
1682 } else {
1683 return uvd_v7_0_start(adev);
1684 }
1685}
1686#endif
1687
1688static int uvd_v7_0_set_clockgating_state(void *handle,
1689 enum amd_clockgating_state state)
1690{
1691 /* needed for driver unload*/
1692 return 0;
1693}
1694
1695const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
1696 .name = "uvd_v7_0",
1697 .early_init = uvd_v7_0_early_init,
1698 .late_init = NULL,
1699 .sw_init = uvd_v7_0_sw_init,
1700 .sw_fini = uvd_v7_0_sw_fini,
1701 .hw_init = uvd_v7_0_hw_init,
1702 .hw_fini = uvd_v7_0_hw_fini,
1703 .suspend = uvd_v7_0_suspend,
1704 .resume = uvd_v7_0_resume,
1705 .is_idle = NULL /* uvd_v7_0_is_idle */,
1706 .wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */,
1707 .check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */,
1708 .pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */,
1709 .soft_reset = NULL /* uvd_v7_0_soft_reset */,
1710 .post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */,
1711 .set_clockgating_state = uvd_v7_0_set_clockgating_state,
1712 .set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */,
1713};
1714
1715static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1716 .type = AMDGPU_RING_TYPE_UVD,
1717 .align_mask = 0xf,
946a4d5b 1718 .nop = PACKET0(0x81ff, 0),
09bfb891 1719 .support_64bit_ptrs = false,
0eeb68b3 1720 .vmhub = AMDGPU_MMHUB,
09bfb891
LL
1721 .get_rptr = uvd_v7_0_ring_get_rptr,
1722 .get_wptr = uvd_v7_0_ring_get_wptr,
1723 .set_wptr = uvd_v7_0_ring_set_wptr,
1724 .emit_frame_size =
1725 2 + /* uvd_v7_0_ring_emit_hdp_flush */
1726 2 + /* uvd_v7_0_ring_emit_hdp_invalidate */
2e819849 1727 34 + /* uvd_v7_0_ring_emit_vm_flush */
09bfb891
LL
1728 14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
1729 .emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
1730 .emit_ib = uvd_v7_0_ring_emit_ib,
1731 .emit_fence = uvd_v7_0_ring_emit_fence,
1732 .emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
1733 .emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
1734 .emit_hdp_invalidate = uvd_v7_0_ring_emit_hdp_invalidate,
1735 .test_ring = uvd_v7_0_ring_test_ring,
1736 .test_ib = amdgpu_uvd_ring_test_ib,
946a4d5b 1737 .insert_nop = uvd_v7_0_ring_insert_nop,
09bfb891
LL
1738 .pad_ib = amdgpu_ring_generic_pad_ib,
1739 .begin_use = amdgpu_uvd_ring_begin_use,
1740 .end_use = amdgpu_uvd_ring_end_use,
b6cb3b5c 1741 .emit_wreg = uvd_v7_0_ring_emit_wreg,
09bfb891
LL
1742};
1743
1744static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
1745 .type = AMDGPU_RING_TYPE_UVD_ENC,
1746 .align_mask = 0x3f,
1747 .nop = HEVC_ENC_CMD_NO_OP,
1748 .support_64bit_ptrs = false,
0eeb68b3 1749 .vmhub = AMDGPU_MMHUB,
09bfb891
LL
1750 .get_rptr = uvd_v7_0_enc_ring_get_rptr,
1751 .get_wptr = uvd_v7_0_enc_ring_get_wptr,
1752 .set_wptr = uvd_v7_0_enc_ring_set_wptr,
1753 .emit_frame_size =
2e819849 1754 17 + /* uvd_v7_0_enc_ring_emit_vm_flush */
09bfb891
LL
1755 5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1756 1, /* uvd_v7_0_enc_ring_insert_end */
1757 .emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
1758 .emit_ib = uvd_v7_0_enc_ring_emit_ib,
1759 .emit_fence = uvd_v7_0_enc_ring_emit_fence,
1760 .emit_vm_flush = uvd_v7_0_enc_ring_emit_vm_flush,
1761 .test_ring = uvd_v7_0_enc_ring_test_ring,
1762 .test_ib = uvd_v7_0_enc_ring_test_ib,
1763 .insert_nop = amdgpu_ring_insert_nop,
1764 .insert_end = uvd_v7_0_enc_ring_insert_end,
1765 .pad_ib = amdgpu_ring_generic_pad_ib,
1766 .begin_use = amdgpu_uvd_ring_begin_use,
1767 .end_use = amdgpu_uvd_ring_end_use,
b6cb3b5c 1768 .emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
09bfb891
LL
1769};
1770
1771static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
1772{
1773 adev->uvd.ring.funcs = &uvd_v7_0_ring_vm_funcs;
1774 DRM_INFO("UVD is enabled in VM mode\n");
1775}
1776
1777static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1778{
1779 int i;
1780
1781 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
1782 adev->uvd.ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
1783
1784 DRM_INFO("UVD ENC is enabled in VM mode\n");
1785}
1786
1787static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
1788 .set = uvd_v7_0_set_interrupt_state,
1789 .process = uvd_v7_0_process_interrupt,
1790};
1791
1792static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1793{
1794 adev->uvd.irq.num_types = adev->uvd.num_enc_rings + 1;
1795 adev->uvd.irq.funcs = &uvd_v7_0_irq_funcs;
1796}
1797
1798const struct amdgpu_ip_block_version uvd_v7_0_ip_block =
1799{
1800 .type = AMD_IP_BLOCK_TYPE_UVD,
1801 .major = 7,
1802 .minor = 0,
1803 .rev = 0,
1804 .funcs = &uvd_v7_0_ip_funcs,
1805};