drm/amdgpu/uvd7: add uvd doorbell initialization for sriov
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / uvd_v7_0.c
CommitLineData
09bfb891
LL
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include <drm/drmP.h>
26#include "amdgpu.h"
27#include "amdgpu_uvd.h"
28#include "soc15d.h"
29#include "soc15_common.h"
247ac951 30#include "mmsch_v1_0.h"
09bfb891
LL
31
32#include "vega10/soc15ip.h"
33#include "vega10/UVD/uvd_7_0_offset.h"
34#include "vega10/UVD/uvd_7_0_sh_mask.h"
247ac951
FM
35#include "vega10/VCE/vce_4_0_offset.h"
36#include "vega10/VCE/vce_4_0_default.h"
37#include "vega10/VCE/vce_4_0_sh_mask.h"
09bfb891
LL
38#include "vega10/NBIF/nbif_6_1_offset.h"
39#include "vega10/HDP/hdp_4_0_offset.h"
40#include "vega10/MMHUB/mmhub_1_0_offset.h"
41#include "vega10/MMHUB/mmhub_1_0_sh_mask.h"
42
43static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
44static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
45static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
46static int uvd_v7_0_start(struct amdgpu_device *adev);
47static void uvd_v7_0_stop(struct amdgpu_device *adev);
247ac951 48static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
09bfb891
LL
49
50/**
51 * uvd_v7_0_ring_get_rptr - get read pointer
52 *
53 * @ring: amdgpu_ring pointer
54 *
55 * Returns the current hardware read pointer
56 */
57static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
58{
59 struct amdgpu_device *adev = ring->adev;
60
61 return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR));
62}
63
64/**
65 * uvd_v7_0_enc_ring_get_rptr - get enc read pointer
66 *
67 * @ring: amdgpu_ring pointer
68 *
69 * Returns the current hardware enc read pointer
70 */
71static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
72{
73 struct amdgpu_device *adev = ring->adev;
74
75 if (ring == &adev->uvd.ring_enc[0])
76 return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_RPTR));
77 else
78 return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_RPTR2));
79}
80
81/**
82 * uvd_v7_0_ring_get_wptr - get write pointer
83 *
84 * @ring: amdgpu_ring pointer
85 *
86 * Returns the current hardware write pointer
87 */
88static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
89{
90 struct amdgpu_device *adev = ring->adev;
91
92 return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR));
93}
94
95/**
96 * uvd_v7_0_enc_ring_get_wptr - get enc write pointer
97 *
98 * @ring: amdgpu_ring pointer
99 *
100 * Returns the current hardware enc write pointer
101 */
102static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
103{
104 struct amdgpu_device *adev = ring->adev;
105
beb2ced5
FM
106 if (ring->use_doorbell)
107 return adev->wb.wb[ring->wptr_offs];
108
09bfb891
LL
109 if (ring == &adev->uvd.ring_enc[0])
110 return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_WPTR));
111 else
112 return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_WPTR2));
113}
114
115/**
116 * uvd_v7_0_ring_set_wptr - set write pointer
117 *
118 * @ring: amdgpu_ring pointer
119 *
120 * Commits the write pointer to the hardware
121 */
122static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
123{
124 struct amdgpu_device *adev = ring->adev;
125
126 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR), lower_32_bits(ring->wptr));
127}
128
129/**
130 * uvd_v7_0_enc_ring_set_wptr - set enc write pointer
131 *
132 * @ring: amdgpu_ring pointer
133 *
134 * Commits the enc write pointer to the hardware
135 */
136static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
137{
138 struct amdgpu_device *adev = ring->adev;
139
beb2ced5
FM
140 if (ring->use_doorbell) {
141 /* XXX check if swapping is necessary on BE */
142 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
143 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
144 return;
145 }
146
09bfb891
LL
147 if (ring == &adev->uvd.ring_enc[0])
148 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_WPTR),
149 lower_32_bits(ring->wptr));
150 else
151 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_WPTR2),
152 lower_32_bits(ring->wptr));
153}
154
155/**
156 * uvd_v7_0_enc_ring_test_ring - test if UVD ENC ring is working
157 *
158 * @ring: the engine to test on
159 *
160 */
161static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
162{
163 struct amdgpu_device *adev = ring->adev;
164 uint32_t rptr = amdgpu_ring_get_rptr(ring);
165 unsigned i;
166 int r;
167
168 r = amdgpu_ring_alloc(ring, 16);
169 if (r) {
170 DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n",
171 ring->idx, r);
172 return r;
173 }
174 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
175 amdgpu_ring_commit(ring);
176
177 for (i = 0; i < adev->usec_timeout; i++) {
178 if (amdgpu_ring_get_rptr(ring) != rptr)
179 break;
180 DRM_UDELAY(1);
181 }
182
183 if (i < adev->usec_timeout) {
184 DRM_INFO("ring test on %d succeeded in %d usecs\n",
185 ring->idx, i);
186 } else {
187 DRM_ERROR("amdgpu: ring %d test failed\n",
188 ring->idx);
189 r = -ETIMEDOUT;
190 }
191
192 return r;
193}
194
195/**
196 * uvd_v7_0_enc_get_create_msg - generate a UVD ENC create msg
197 *
198 * @adev: amdgpu_device pointer
199 * @ring: ring we should submit the msg to
200 * @handle: session handle to use
201 * @fence: optional fence to return
202 *
203 * Open up a stream for HW test
204 */
205static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
206 struct dma_fence **fence)
207{
208 const unsigned ib_size_dw = 16;
209 struct amdgpu_job *job;
210 struct amdgpu_ib *ib;
211 struct dma_fence *f = NULL;
212 uint64_t dummy;
213 int i, r;
214
215 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
216 if (r)
217 return r;
218
219 ib = &job->ibs[0];
220 dummy = ib->gpu_addr + 1024;
221
222 ib->length_dw = 0;
223 ib->ptr[ib->length_dw++] = 0x00000018;
224 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
225 ib->ptr[ib->length_dw++] = handle;
226 ib->ptr[ib->length_dw++] = 0x00000000;
227 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
228 ib->ptr[ib->length_dw++] = dummy;
229
230 ib->ptr[ib->length_dw++] = 0x00000014;
231 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
232 ib->ptr[ib->length_dw++] = 0x0000001c;
233 ib->ptr[ib->length_dw++] = 0x00000000;
234 ib->ptr[ib->length_dw++] = 0x00000000;
235
236 ib->ptr[ib->length_dw++] = 0x00000008;
237 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
238
239 for (i = ib->length_dw; i < ib_size_dw; ++i)
240 ib->ptr[i] = 0x0;
241
242 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
243 job->fence = dma_fence_get(f);
244 if (r)
245 goto err;
246
247 amdgpu_job_free(job);
248 if (fence)
249 *fence = dma_fence_get(f);
250 dma_fence_put(f);
251 return 0;
252
253err:
254 amdgpu_job_free(job);
255 return r;
256}
257
258/**
259 * uvd_v7_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
260 *
261 * @adev: amdgpu_device pointer
262 * @ring: ring we should submit the msg to
263 * @handle: session handle to use
264 * @fence: optional fence to return
265 *
266 * Close up a stream for HW test or if userspace failed to do so
267 */
268int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
269 bool direct, struct dma_fence **fence)
270{
271 const unsigned ib_size_dw = 16;
272 struct amdgpu_job *job;
273 struct amdgpu_ib *ib;
274 struct dma_fence *f = NULL;
275 uint64_t dummy;
276 int i, r;
277
278 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
279 if (r)
280 return r;
281
282 ib = &job->ibs[0];
283 dummy = ib->gpu_addr + 1024;
284
285 ib->length_dw = 0;
286 ib->ptr[ib->length_dw++] = 0x00000018;
287 ib->ptr[ib->length_dw++] = 0x00000001;
288 ib->ptr[ib->length_dw++] = handle;
289 ib->ptr[ib->length_dw++] = 0x00000000;
290 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
291 ib->ptr[ib->length_dw++] = dummy;
292
293 ib->ptr[ib->length_dw++] = 0x00000014;
294 ib->ptr[ib->length_dw++] = 0x00000002;
295 ib->ptr[ib->length_dw++] = 0x0000001c;
296 ib->ptr[ib->length_dw++] = 0x00000000;
297 ib->ptr[ib->length_dw++] = 0x00000000;
298
299 ib->ptr[ib->length_dw++] = 0x00000008;
300 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
301
302 for (i = ib->length_dw; i < ib_size_dw; ++i)
303 ib->ptr[i] = 0x0;
304
305 if (direct) {
306 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
307 job->fence = dma_fence_get(f);
308 if (r)
309 goto err;
310
311 amdgpu_job_free(job);
312 } else {
313 r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
314 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
315 if (r)
316 goto err;
317 }
318
319 if (fence)
320 *fence = dma_fence_get(f);
321 dma_fence_put(f);
322 return 0;
323
324err:
325 amdgpu_job_free(job);
326 return r;
327}
328
329/**
330 * uvd_v7_0_enc_ring_test_ib - test if UVD ENC IBs are working
331 *
332 * @ring: the engine to test on
333 *
334 */
335static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
336{
337 struct dma_fence *fence = NULL;
338 long r;
339
340 r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL);
341 if (r) {
342 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
343 goto error;
344 }
345
346 r = uvd_v7_0_enc_get_destroy_msg(ring, 1, true, &fence);
347 if (r) {
348 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
349 goto error;
350 }
351
352 r = dma_fence_wait_timeout(fence, false, timeout);
353 if (r == 0) {
354 DRM_ERROR("amdgpu: IB test timed out.\n");
355 r = -ETIMEDOUT;
356 } else if (r < 0) {
357 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
358 } else {
359 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
360 r = 0;
361 }
362error:
363 dma_fence_put(fence);
364 return r;
365}
366
367static int uvd_v7_0_early_init(void *handle)
368{
369 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
370
371 adev->uvd.num_enc_rings = 2;
372 uvd_v7_0_set_ring_funcs(adev);
373 uvd_v7_0_set_enc_ring_funcs(adev);
374 uvd_v7_0_set_irq_funcs(adev);
375
376 return 0;
377}
378
379static int uvd_v7_0_sw_init(void *handle)
380{
381 struct amdgpu_ring *ring;
382 struct amd_sched_rq *rq;
383 int i, r;
384 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
385
386 /* UVD TRAP */
387 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UVD, 124, &adev->uvd.irq);
388 if (r)
389 return r;
390
391 /* UVD ENC TRAP */
392 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
393 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UVD, i + 119, &adev->uvd.irq);
394 if (r)
395 return r;
396 }
397
398 r = amdgpu_uvd_sw_init(adev);
399 if (r)
400 return r;
401
402 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
403 const struct common_firmware_header *hdr;
404 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
405 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].ucode_id = AMDGPU_UCODE_ID_UVD;
406 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
407 adev->firmware.fw_size +=
408 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
409 DRM_INFO("PSP loading UVD firmware\n");
410 }
411
412 ring = &adev->uvd.ring_enc[0];
413 rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
414 r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
415 rq, amdgpu_sched_jobs);
416 if (r) {
417 DRM_ERROR("Failed setting up UVD ENC run queue.\n");
418 return r;
419 }
420
421 r = amdgpu_uvd_resume(adev);
422 if (r)
423 return r;
424
425 ring = &adev->uvd.ring;
426 sprintf(ring->name, "uvd");
427 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
428 if (r)
429 return r;
430
431 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
432 ring = &adev->uvd.ring_enc[i];
433 sprintf(ring->name, "uvd_enc%d", i);
beb2ced5
FM
434 if (amdgpu_sriov_vf(adev)) {
435 ring->use_doorbell = true;
436 ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2;
437 }
09bfb891
LL
438 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
439 if (r)
440 return r;
441 }
442
443 return r;
444}
445
446static int uvd_v7_0_sw_fini(void *handle)
447{
448 int i, r;
449 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
450
451 r = amdgpu_uvd_suspend(adev);
452 if (r)
453 return r;
454
455 amd_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
456
457 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
458 amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
459
50237287 460 return amdgpu_uvd_sw_fini(adev);
09bfb891
LL
461}
462
463/**
464 * uvd_v7_0_hw_init - start and test UVD block
465 *
466 * @adev: amdgpu_device pointer
467 *
468 * Initialize the hardware, boot up the VCPU and do some testing
469 */
470static int uvd_v7_0_hw_init(void *handle)
471{
472 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
473 struct amdgpu_ring *ring = &adev->uvd.ring;
474 uint32_t tmp;
475 int i, r;
476
477 r = uvd_v7_0_start(adev);
478 if (r)
479 goto done;
480
481 ring->ready = true;
482 r = amdgpu_ring_test_ring(ring);
483 if (r) {
484 ring->ready = false;
485 goto done;
486 }
487
488 r = amdgpu_ring_alloc(ring, 10);
489 if (r) {
490 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
491 goto done;
492 }
493
494 tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
495 mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
496 amdgpu_ring_write(ring, tmp);
497 amdgpu_ring_write(ring, 0xFFFFF);
498
499 tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
500 mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
501 amdgpu_ring_write(ring, tmp);
502 amdgpu_ring_write(ring, 0xFFFFF);
503
504 tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
505 mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
506 amdgpu_ring_write(ring, tmp);
507 amdgpu_ring_write(ring, 0xFFFFF);
508
509 /* Clear timeout status bits */
510 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0,
511 mmUVD_SEMA_TIMEOUT_STATUS), 0));
512 amdgpu_ring_write(ring, 0x8);
513
514 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0,
515 mmUVD_SEMA_CNTL), 0));
516 amdgpu_ring_write(ring, 3);
517
518 amdgpu_ring_commit(ring);
519
520 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
521 ring = &adev->uvd.ring_enc[i];
522 ring->ready = true;
523 r = amdgpu_ring_test_ring(ring);
524 if (r) {
525 ring->ready = false;
526 goto done;
527 }
528 }
529
530done:
531 if (!r)
532 DRM_INFO("UVD and UVD ENC initialized successfully.\n");
533
534 return r;
535}
536
537/**
538 * uvd_v7_0_hw_fini - stop the hardware block
539 *
540 * @adev: amdgpu_device pointer
541 *
542 * Stop the UVD block, mark ring as not ready any more
543 */
544static int uvd_v7_0_hw_fini(void *handle)
545{
546 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
547 struct amdgpu_ring *ring = &adev->uvd.ring;
548
549 uvd_v7_0_stop(adev);
550 ring->ready = false;
551
552 return 0;
553}
554
555static int uvd_v7_0_suspend(void *handle)
556{
557 int r;
558 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
559
560 r = uvd_v7_0_hw_fini(adev);
561 if (r)
562 return r;
563
564 /* Skip this for APU for now */
50237287 565 if (!(adev->flags & AMD_IS_APU))
09bfb891 566 r = amdgpu_uvd_suspend(adev);
09bfb891
LL
567
568 return r;
569}
570
571static int uvd_v7_0_resume(void *handle)
572{
573 int r;
574 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
575
576 /* Skip this for APU for now */
577 if (!(adev->flags & AMD_IS_APU)) {
578 r = amdgpu_uvd_resume(adev);
579 if (r)
580 return r;
581 }
50237287 582 return uvd_v7_0_hw_init(adev);
09bfb891
LL
583}
584
585/**
586 * uvd_v7_0_mc_resume - memory controller programming
587 *
588 * @adev: amdgpu_device pointer
589 *
590 * Let the UVD memory controller know it's offsets
591 */
592static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
593{
594 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
595 uint32_t offset;
596
597 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
598 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
599 lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
600 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
601 upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
602 offset = 0;
603 } else {
604 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
605 lower_32_bits(adev->uvd.gpu_addr));
606 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
607 upper_32_bits(adev->uvd.gpu_addr));
608 offset = size;
609 }
610
611 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
612 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
613 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size);
614
615 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
616 lower_32_bits(adev->uvd.gpu_addr + offset));
617 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
618 upper_32_bits(adev->uvd.gpu_addr + offset));
619 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
620 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
621
622 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
623 lower_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
624 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
625 upper_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
626 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
627 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2),
628 AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
629
630 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_ADDR_CONFIG),
631 adev->gfx.config.gb_addr_config);
632 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG),
633 adev->gfx.config.gb_addr_config);
634 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG),
635 adev->gfx.config.gb_addr_config);
636
637 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
638}
639
247ac951
FM
640static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
641 struct amdgpu_mm_table *table)
642{
643 uint32_t data = 0, loop;
644 uint64_t addr = table->gpu_addr;
645 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
646 uint32_t size;
647
648 size = header->header_size + header->vce_table_size + header->uvd_table_size;
649
650 /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
651 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO), lower_32_bits(addr));
652 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI), upper_32_bits(addr));
653
654 /* 2, update vmid of descriptor */
655 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_VMID));
656 data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
657 data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
658 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_VMID), data);
659
660 /* 3, notify mmsch about the size of this descriptor */
661 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE), size);
662
663 /* 4, set resp to zero */
664 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP), 0);
665
666 /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
667 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST), 0x10000001);
668
669 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP));
670 loop = 1000;
671 while ((data & 0x10000002) != 0x10000002) {
672 udelay(10);
673 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP));
674 loop--;
675 if (!loop)
676 break;
677 }
678
679 if (!loop) {
680 dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
681 return -EBUSY;
682 }
683
684 return 0;
685}
686
687static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
688{
689 struct amdgpu_ring *ring;
690 uint32_t offset, size, tmp;
691 uint32_t table_size = 0;
692 struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} };
693 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
694 struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} };
695 //struct mmsch_v1_0_cmd_indirect_write indirect_wt = {{0}};
696 struct mmsch_v1_0_cmd_end end = { {0} };
697 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
698 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
699
700 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
701 direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
702 direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
703 end.cmd_header.command_type = MMSCH_COMMAND__END;
704
705 if (header->uvd_table_offset == 0 && header->uvd_table_size == 0) {
706 header->version = MMSCH_VERSION;
707 header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
708
709 if (header->vce_table_offset == 0 && header->vce_table_size == 0)
710 header->uvd_table_offset = header->header_size;
711 else
712 header->uvd_table_offset = header->vce_table_size + header->vce_table_offset;
713
714 init_table += header->uvd_table_offset;
715
716 ring = &adev->uvd.ring;
717 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
718
719 /* disable clock gating */
720 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
721 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK, 0);
722 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS),
723 0xFFFFFFFF, 0x00000004);
724 /* mc resume*/
725 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
726 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
727 lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
728 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
729 upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
730 offset = 0;
731 } else {
732 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
733 lower_32_bits(adev->uvd.gpu_addr));
734 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
735 upper_32_bits(adev->uvd.gpu_addr));
736 offset = size;
737 }
738
739 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
740 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
741 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size);
742
743 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
744 lower_32_bits(adev->uvd.gpu_addr + offset));
745 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
746 upper_32_bits(adev->uvd.gpu_addr + offset));
747 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
748 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
749
750 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
751 lower_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
752 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
753 upper_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
754 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
755 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2),
756 AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
757
758 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_ADDR_CONFIG),
759 adev->gfx.config.gb_addr_config);
760 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG),
761 adev->gfx.config.gb_addr_config);
762 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG),
763 adev->gfx.config.gb_addr_config);
764 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
765 /* mc resume end*/
766
767 /* disable clock gating */
768 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL),
769 ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
770
771 /* disable interupt */
772 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
773 ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
774
775 /* stall UMC and register bus before resetting VCPU */
776 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
777 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
778 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
779
780 /* put LMI, VCPU, RBC etc... into reset */
781 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
782 (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
783 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
784 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
785 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
786 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
787 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
788 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
789 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
790
791 /* initialize UVD memory controller */
792 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL),
793 (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
794 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
795 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
796 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
797 UVD_LMI_CTRL__REQ_MODE_MASK |
798 0x00100000L));
799
800 /* disable byte swapping */
801 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_SWAP_CNTL), 0);
802 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MP_SWAP_CNTL), 0);
803
804 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA0), 0x40c2040);
805 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA1), 0x0);
806 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB0), 0x40c2040);
807 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB1), 0x0);
808 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_ALU), 0);
809 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUX), 0x88);
810
811 /* take all subblocks out of reset, except VCPU */
812 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
813 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
814
815 /* enable VCPU clock */
816 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
817 UVD_VCPU_CNTL__CLK_EN_MASK);
818
819 /* enable UMC */
820 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
821 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
822
823 /* boot up the VCPU */
824 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0);
825
826 MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0x02, 0x02);
827
828 /* enable master interrupt */
829 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
830 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
831 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
832
833 /* clear the bit 4 of UVD_STATUS */
834 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS),
835 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
836
837 /* force RBC into idle state */
838 size = order_base_2(ring->ring_size);
839 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
840 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
841 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
842 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
843 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
844 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
845 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp);
846
847 /* set the write pointer delay */
848 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL), 0);
849
850 /* set the wb address */
851 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR),
852 (upper_32_bits(ring->gpu_addr) >> 2));
853
854 /* programm the RB_BASE for ring buffer */
855 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
856 lower_32_bits(ring->gpu_addr));
857 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
858 upper_32_bits(ring->gpu_addr));
859
860 ring->wptr = 0;
861 ring = &adev->uvd.ring_enc[0];
862 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO), ring->gpu_addr);
863 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
864 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE), ring->ring_size / 4);
865
866 ring = &adev->uvd.ring_enc[1];
867 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO2), ring->gpu_addr);
868 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI2), upper_32_bits(ring->gpu_addr));
869 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE2), ring->ring_size / 4);
870
871 /* add end packet */
872 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
873 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
874 header->uvd_table_size = table_size;
875
876 return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
877 }
878 return -EINVAL; /* already initializaed ? */
879}
880
09bfb891
LL
881/**
882 * uvd_v7_0_start - start UVD block
883 *
884 * @adev: amdgpu_device pointer
885 *
886 * Setup and start the UVD block
887 */
888static int uvd_v7_0_start(struct amdgpu_device *adev)
889{
890 struct amdgpu_ring *ring = &adev->uvd.ring;
891 uint32_t rb_bufsz, tmp;
892 uint32_t lmi_swap_cntl;
893 uint32_t mp_swap_cntl;
894 int i, j, r;
895
896 /* disable DPG */
897 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
898 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
899
900 /* disable byte swapping */
901 lmi_swap_cntl = 0;
902 mp_swap_cntl = 0;
903
904 uvd_v7_0_mc_resume(adev);
905
906 /* disable clock gating */
907 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL), 0,
908 ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
909
910 /* disable interupt */
911 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
912 ~UVD_MASTINT_EN__VCPU_EN_MASK);
913
914 /* stall UMC and register bus before resetting VCPU */
915 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
916 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
917 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
918 mdelay(1);
919
920 /* put LMI, VCPU, RBC etc... into reset */
921 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
922 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
923 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
924 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
925 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
926 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
927 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
928 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
929 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
930 mdelay(5);
931
932 /* initialize UVD memory controller */
933 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL),
934 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
935 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
936 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
937 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
938 UVD_LMI_CTRL__REQ_MODE_MASK |
939 0x00100000L);
940
941#ifdef __BIG_ENDIAN
942 /* swap (8 in 32) RB and IB */
943 lmi_swap_cntl = 0xa;
944 mp_swap_cntl = 0;
945#endif
946 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_SWAP_CNTL), lmi_swap_cntl);
947 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MP_SWAP_CNTL), mp_swap_cntl);
948
949 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA0), 0x40c2040);
950 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA1), 0x0);
951 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB0), 0x40c2040);
952 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB1), 0x0);
953 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_ALU), 0);
954 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUX), 0x88);
955
956 /* take all subblocks out of reset, except VCPU */
957 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
958 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
959 mdelay(5);
960
961 /* enable VCPU clock */
962 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
963 UVD_VCPU_CNTL__CLK_EN_MASK);
964
965 /* enable UMC */
966 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
967 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
968
969 /* boot up the VCPU */
970 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0);
971 mdelay(10);
972
973 for (i = 0; i < 10; ++i) {
974 uint32_t status;
975
976 for (j = 0; j < 100; ++j) {
977 status = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS));
978 if (status & 2)
979 break;
980 mdelay(10);
981 }
982 r = 0;
983 if (status & 2)
984 break;
985
986 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
987 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
988 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
989 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
990 mdelay(10);
991 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
992 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
993 mdelay(10);
994 r = -1;
995 }
996
997 if (r) {
998 DRM_ERROR("UVD not responding, giving up!!!\n");
999 return r;
1000 }
1001 /* enable master interrupt */
1002 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
1003 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
1004 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
1005
1006 /* clear the bit 4 of UVD_STATUS */
1007 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
1008 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1009
1010 /* force RBC into idle state */
1011 rb_bufsz = order_base_2(ring->ring_size);
1012 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1013 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1014 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1015 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
1016 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1017 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1018 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp);
1019
1020 /* set the write pointer delay */
1021 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL), 0);
1022
1023 /* set the wb address */
1024 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR),
1025 (upper_32_bits(ring->gpu_addr) >> 2));
1026
1027 /* programm the RB_BASE for ring buffer */
1028 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1029 lower_32_bits(ring->gpu_addr));
1030 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1031 upper_32_bits(ring->gpu_addr));
1032
1033 /* Initialize the ring buffer's read and write pointers */
1034 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR), 0);
1035
1036 ring->wptr = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR));
1037 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR),
1038 lower_32_bits(ring->wptr));
1039
1040 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
1041 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1042
1043 ring = &adev->uvd.ring_enc[0];
1044 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_RPTR), lower_32_bits(ring->wptr));
1045 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_WPTR), lower_32_bits(ring->wptr));
1046 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO), ring->gpu_addr);
1047 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
1048 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE), ring->ring_size / 4);
1049
1050 ring = &adev->uvd.ring_enc[1];
1051 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_RPTR2), lower_32_bits(ring->wptr));
1052 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_WPTR2), lower_32_bits(ring->wptr));
1053 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO2), ring->gpu_addr);
1054 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI2), upper_32_bits(ring->gpu_addr));
1055 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE2), ring->ring_size / 4);
1056
1057 return 0;
1058}
1059
1060/**
1061 * uvd_v7_0_stop - stop UVD block
1062 *
1063 * @adev: amdgpu_device pointer
1064 *
1065 * stop the UVD block
1066 */
1067static void uvd_v7_0_stop(struct amdgpu_device *adev)
1068{
1069 /* force RBC into idle state */
1070 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0x11010101);
1071
1072 /* Stall UMC and register bus before resetting VCPU */
1073 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
1074 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1075 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1076 mdelay(1);
1077
1078 /* put VCPU into reset */
1079 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1080 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1081 mdelay(5);
1082
1083 /* disable VCPU clock */
1084 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0x0);
1085
1086 /* Unstall UMC and register bus */
1087 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
1088 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1089}
1090
1091/**
1092 * uvd_v7_0_ring_emit_fence - emit an fence & trap command
1093 *
1094 * @ring: amdgpu_ring pointer
1095 * @fence: fence to emit
1096 *
1097 * Write a fence and a trap command to the ring.
1098 */
1099static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1100 unsigned flags)
1101{
1102 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1103
1104 amdgpu_ring_write(ring,
1105 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
1106 amdgpu_ring_write(ring, seq);
1107 amdgpu_ring_write(ring,
1108 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1109 amdgpu_ring_write(ring, addr & 0xffffffff);
1110 amdgpu_ring_write(ring,
1111 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1112 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1113 amdgpu_ring_write(ring,
1114 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1115 amdgpu_ring_write(ring, 0);
1116
1117 amdgpu_ring_write(ring,
1118 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1119 amdgpu_ring_write(ring, 0);
1120 amdgpu_ring_write(ring,
1121 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1122 amdgpu_ring_write(ring, 0);
1123 amdgpu_ring_write(ring,
1124 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1125 amdgpu_ring_write(ring, 2);
1126}
1127
1128/**
1129 * uvd_v7_0_enc_ring_emit_fence - emit an enc fence & trap command
1130 *
1131 * @ring: amdgpu_ring pointer
1132 * @fence: fence to emit
1133 *
1134 * Write enc a fence and a trap command to the ring.
1135 */
1136static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1137 u64 seq, unsigned flags)
1138{
1139 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1140
1141 amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
1142 amdgpu_ring_write(ring, addr);
1143 amdgpu_ring_write(ring, upper_32_bits(addr));
1144 amdgpu_ring_write(ring, seq);
1145 amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
1146}
1147
1148/**
1149 * uvd_v7_0_ring_emit_hdp_flush - emit an hdp flush
1150 *
1151 * @ring: amdgpu_ring pointer
1152 *
1153 * Emits an hdp flush.
1154 */
1155static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1156{
1157 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(NBIF, 0,
1158 mmHDP_MEM_COHERENCY_FLUSH_CNTL), 0));
1159 amdgpu_ring_write(ring, 0);
1160}
1161
1162/**
1163 * uvd_v7_0_ring_hdp_invalidate - emit an hdp invalidate
1164 *
1165 * @ring: amdgpu_ring pointer
1166 *
1167 * Emits an hdp invalidate.
1168 */
1169static void uvd_v7_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
1170{
1171 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0), 0));
1172 amdgpu_ring_write(ring, 1);
1173}
1174
1175/**
1176 * uvd_v7_0_ring_test_ring - register write test
1177 *
1178 * @ring: amdgpu_ring pointer
1179 *
1180 * Test if we can successfully write to the context register
1181 */
1182static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
1183{
1184 struct amdgpu_device *adev = ring->adev;
1185 uint32_t tmp = 0;
1186 unsigned i;
1187 int r;
1188
1189 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
1190 r = amdgpu_ring_alloc(ring, 3);
1191 if (r) {
1192 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
1193 ring->idx, r);
1194 return r;
1195 }
1196 amdgpu_ring_write(ring,
1197 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
1198 amdgpu_ring_write(ring, 0xDEADBEEF);
1199 amdgpu_ring_commit(ring);
1200 for (i = 0; i < adev->usec_timeout; i++) {
1201 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
1202 if (tmp == 0xDEADBEEF)
1203 break;
1204 DRM_UDELAY(1);
1205 }
1206
1207 if (i < adev->usec_timeout) {
1208 DRM_INFO("ring test on %d succeeded in %d usecs\n",
1209 ring->idx, i);
1210 } else {
1211 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
1212 ring->idx, tmp);
1213 r = -EINVAL;
1214 }
1215 return r;
1216}
1217
1218/**
1219 * uvd_v7_0_ring_emit_ib - execute indirect buffer
1220 *
1221 * @ring: amdgpu_ring pointer
1222 * @ib: indirect buffer to execute
1223 *
1224 * Write ring commands to execute the indirect buffer
1225 */
1226static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
1227 struct amdgpu_ib *ib,
1228 unsigned vm_id, bool ctx_switch)
1229{
1230 amdgpu_ring_write(ring,
1231 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
1232 amdgpu_ring_write(ring, vm_id);
1233
1234 amdgpu_ring_write(ring,
1235 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1236 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1237 amdgpu_ring_write(ring,
1238 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1239 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1240 amdgpu_ring_write(ring,
1241 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0));
1242 amdgpu_ring_write(ring, ib->length_dw);
1243}
1244
1245/**
1246 * uvd_v7_0_enc_ring_emit_ib - enc execute indirect buffer
1247 *
1248 * @ring: amdgpu_ring pointer
1249 * @ib: indirect buffer to execute
1250 *
1251 * Write enc ring commands to execute the indirect buffer
1252 */
1253static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1254 struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
1255{
1256 amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1257 amdgpu_ring_write(ring, vm_id);
1258 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1259 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1260 amdgpu_ring_write(ring, ib->length_dw);
1261}
1262
1263static void uvd_v7_0_vm_reg_write(struct amdgpu_ring *ring,
1264 uint32_t data0, uint32_t data1)
1265{
1266 amdgpu_ring_write(ring,
1267 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1268 amdgpu_ring_write(ring, data0);
1269 amdgpu_ring_write(ring,
1270 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1271 amdgpu_ring_write(ring, data1);
1272 amdgpu_ring_write(ring,
1273 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1274 amdgpu_ring_write(ring, 8);
1275}
1276
1277static void uvd_v7_0_vm_reg_wait(struct amdgpu_ring *ring,
1278 uint32_t data0, uint32_t data1, uint32_t mask)
1279{
1280 amdgpu_ring_write(ring,
1281 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1282 amdgpu_ring_write(ring, data0);
1283 amdgpu_ring_write(ring,
1284 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1285 amdgpu_ring_write(ring, data1);
1286 amdgpu_ring_write(ring,
1287 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
1288 amdgpu_ring_write(ring, mask);
1289 amdgpu_ring_write(ring,
1290 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1291 amdgpu_ring_write(ring, 12);
1292}
1293
1294static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1295 unsigned vm_id, uint64_t pd_addr)
1296{
2e819849 1297 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
03f89feb 1298 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
09bfb891 1299 uint32_t data0, data1, mask;
4789c463 1300 unsigned eng = ring->vm_inv_eng;
09bfb891
LL
1301
1302 pd_addr = pd_addr | 0x1; /* valid bit */
1303 /* now only use physical base address of PDE and valid */
1304 BUG_ON(pd_addr & 0xFFFF00000000003EULL);
1305
2e819849
CK
1306 data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
1307 data1 = upper_32_bits(pd_addr);
1308 uvd_v7_0_vm_reg_write(ring, data0, data1);
1309
1310 data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
1311 data1 = lower_32_bits(pd_addr);
1312 uvd_v7_0_vm_reg_write(ring, data0, data1);
1313
1314 data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
1315 data1 = lower_32_bits(pd_addr);
1316 mask = 0xffffffff;
1317 uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
1318
1319 /* flush TLB */
1320 data0 = (hub->vm_inv_eng0_req + eng) << 2;
1321 data1 = req;
1322 uvd_v7_0_vm_reg_write(ring, data0, data1);
1323
1324 /* wait for flush */
1325 data0 = (hub->vm_inv_eng0_ack + eng) << 2;
1326 data1 = 1 << vm_id;
1327 mask = 1 << vm_id;
1328 uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
09bfb891
LL
1329}
1330
1331static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1332{
1333 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1334}
1335
1336static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1337 unsigned int vm_id, uint64_t pd_addr)
1338{
2e819849 1339 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
03f89feb 1340 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
4789c463 1341 unsigned eng = ring->vm_inv_eng;
09bfb891
LL
1342
1343 pd_addr = pd_addr | 0x1; /* valid bit */
1344 /* now only use physical base address of PDE and valid */
1345 BUG_ON(pd_addr & 0xFFFF00000000003EULL);
1346
2e819849
CK
1347 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1348 amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
1349 amdgpu_ring_write(ring, upper_32_bits(pd_addr));
1350
1351 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1352 amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
1353 amdgpu_ring_write(ring, lower_32_bits(pd_addr));
1354
1355 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1356 amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
1357 amdgpu_ring_write(ring, 0xffffffff);
1358 amdgpu_ring_write(ring, lower_32_bits(pd_addr));
1359
1360 /* flush TLB */
1361 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1362 amdgpu_ring_write(ring, (hub->vm_inv_eng0_req + eng) << 2);
1363 amdgpu_ring_write(ring, req);
1364
1365 /* wait for flush */
1366 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1367 amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
1368 amdgpu_ring_write(ring, 1 << vm_id);
1369 amdgpu_ring_write(ring, 1 << vm_id);
09bfb891
LL
1370}
1371
1372#if 0
1373static bool uvd_v7_0_is_idle(void *handle)
1374{
1375 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1376
1377 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1378}
1379
1380static int uvd_v7_0_wait_for_idle(void *handle)
1381{
1382 unsigned i;
1383 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1384
1385 for (i = 0; i < adev->usec_timeout; i++) {
1386 if (uvd_v7_0_is_idle(handle))
1387 return 0;
1388 }
1389 return -ETIMEDOUT;
1390}
1391
1392#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
1393static bool uvd_v7_0_check_soft_reset(void *handle)
1394{
1395 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1396 u32 srbm_soft_reset = 0;
1397 u32 tmp = RREG32(mmSRBM_STATUS);
1398
1399 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1400 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1401 (RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS) &
1402 AMDGPU_UVD_STATUS_BUSY_MASK)))
1403 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1404 SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1405
1406 if (srbm_soft_reset) {
1407 adev->uvd.srbm_soft_reset = srbm_soft_reset;
1408 return true;
1409 } else {
1410 adev->uvd.srbm_soft_reset = 0;
1411 return false;
1412 }
1413}
1414
1415static int uvd_v7_0_pre_soft_reset(void *handle)
1416{
1417 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1418
1419 if (!adev->uvd.srbm_soft_reset)
1420 return 0;
1421
1422 uvd_v7_0_stop(adev);
1423 return 0;
1424}
1425
1426static int uvd_v7_0_soft_reset(void *handle)
1427{
1428 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1429 u32 srbm_soft_reset;
1430
1431 if (!adev->uvd.srbm_soft_reset)
1432 return 0;
1433 srbm_soft_reset = adev->uvd.srbm_soft_reset;
1434
1435 if (srbm_soft_reset) {
1436 u32 tmp;
1437
1438 tmp = RREG32(mmSRBM_SOFT_RESET);
1439 tmp |= srbm_soft_reset;
1440 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1441 WREG32(mmSRBM_SOFT_RESET, tmp);
1442 tmp = RREG32(mmSRBM_SOFT_RESET);
1443
1444 udelay(50);
1445
1446 tmp &= ~srbm_soft_reset;
1447 WREG32(mmSRBM_SOFT_RESET, tmp);
1448 tmp = RREG32(mmSRBM_SOFT_RESET);
1449
1450 /* Wait a little for things to settle down */
1451 udelay(50);
1452 }
1453
1454 return 0;
1455}
1456
1457static int uvd_v7_0_post_soft_reset(void *handle)
1458{
1459 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1460
1461 if (!adev->uvd.srbm_soft_reset)
1462 return 0;
1463
1464 mdelay(5);
1465
1466 return uvd_v7_0_start(adev);
1467}
1468#endif
1469
1470static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
1471 struct amdgpu_irq_src *source,
1472 unsigned type,
1473 enum amdgpu_interrupt_state state)
1474{
1475 // TODO
1476 return 0;
1477}
1478
1479static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
1480 struct amdgpu_irq_src *source,
1481 struct amdgpu_iv_entry *entry)
1482{
1483 DRM_DEBUG("IH: UVD TRAP\n");
1484 switch (entry->src_id) {
1485 case 124:
1486 amdgpu_fence_process(&adev->uvd.ring);
1487 break;
1488 case 119:
1489 amdgpu_fence_process(&adev->uvd.ring_enc[0]);
1490 break;
1491 case 120:
1492 amdgpu_fence_process(&adev->uvd.ring_enc[1]);
1493 break;
1494 default:
1495 DRM_ERROR("Unhandled interrupt: %d %d\n",
1496 entry->src_id, entry->src_data[0]);
1497 break;
1498 }
1499
1500 return 0;
1501}
1502
1503#if 0
1504static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
1505{
1506 uint32_t data, data1, data2, suvd_flags;
1507
1508 data = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL));
1509 data1 = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SUVD_CGC_GATE));
1510 data2 = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SUVD_CGC_CTRL));
1511
1512 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1513 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1514
1515 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1516 UVD_SUVD_CGC_GATE__SIT_MASK |
1517 UVD_SUVD_CGC_GATE__SMP_MASK |
1518 UVD_SUVD_CGC_GATE__SCM_MASK |
1519 UVD_SUVD_CGC_GATE__SDB_MASK;
1520
1521 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1522 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1523 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1524
1525 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1526 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1527 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1528 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1529 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1530 UVD_CGC_CTRL__SYS_MODE_MASK |
1531 UVD_CGC_CTRL__UDEC_MODE_MASK |
1532 UVD_CGC_CTRL__MPEG2_MODE_MASK |
1533 UVD_CGC_CTRL__REGS_MODE_MASK |
1534 UVD_CGC_CTRL__RBC_MODE_MASK |
1535 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1536 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1537 UVD_CGC_CTRL__IDCT_MODE_MASK |
1538 UVD_CGC_CTRL__MPRD_MODE_MASK |
1539 UVD_CGC_CTRL__MPC_MODE_MASK |
1540 UVD_CGC_CTRL__LBSI_MODE_MASK |
1541 UVD_CGC_CTRL__LRBBM_MODE_MASK |
1542 UVD_CGC_CTRL__WCB_MODE_MASK |
1543 UVD_CGC_CTRL__VCPU_MODE_MASK |
1544 UVD_CGC_CTRL__JPEG_MODE_MASK |
1545 UVD_CGC_CTRL__JPEG2_MODE_MASK |
1546 UVD_CGC_CTRL__SCPU_MODE_MASK);
1547 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1548 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1549 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1550 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1551 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1552 data1 |= suvd_flags;
1553
1554 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL), data);
1555 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_GATE), 0);
1556 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SUVD_CGC_GATE), data1);
1557 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SUVD_CGC_CTRL), data2);
1558}
1559
1560static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
1561{
1562 uint32_t data, data1, cgc_flags, suvd_flags;
1563
1564 data = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_GATE));
1565 data1 = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SUVD_CGC_GATE));
1566
1567 cgc_flags = UVD_CGC_GATE__SYS_MASK |
1568 UVD_CGC_GATE__UDEC_MASK |
1569 UVD_CGC_GATE__MPEG2_MASK |
1570 UVD_CGC_GATE__RBC_MASK |
1571 UVD_CGC_GATE__LMI_MC_MASK |
1572 UVD_CGC_GATE__IDCT_MASK |
1573 UVD_CGC_GATE__MPRD_MASK |
1574 UVD_CGC_GATE__MPC_MASK |
1575 UVD_CGC_GATE__LBSI_MASK |
1576 UVD_CGC_GATE__LRBBM_MASK |
1577 UVD_CGC_GATE__UDEC_RE_MASK |
1578 UVD_CGC_GATE__UDEC_CM_MASK |
1579 UVD_CGC_GATE__UDEC_IT_MASK |
1580 UVD_CGC_GATE__UDEC_DB_MASK |
1581 UVD_CGC_GATE__UDEC_MP_MASK |
1582 UVD_CGC_GATE__WCB_MASK |
1583 UVD_CGC_GATE__VCPU_MASK |
1584 UVD_CGC_GATE__SCPU_MASK |
1585 UVD_CGC_GATE__JPEG_MASK |
1586 UVD_CGC_GATE__JPEG2_MASK;
1587
1588 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1589 UVD_SUVD_CGC_GATE__SIT_MASK |
1590 UVD_SUVD_CGC_GATE__SMP_MASK |
1591 UVD_SUVD_CGC_GATE__SCM_MASK |
1592 UVD_SUVD_CGC_GATE__SDB_MASK;
1593
1594 data |= cgc_flags;
1595 data1 |= suvd_flags;
1596
1597 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_GATE), data);
1598 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SUVD_CGC_GATE), data1);
1599}
1600
1601static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
1602{
1603 u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
1604
1605 if (enable)
1606 tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1607 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1608 else
1609 tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1610 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1611
1612 WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
1613}
1614
1615
1616static int uvd_v7_0_set_clockgating_state(void *handle,
1617 enum amd_clockgating_state state)
1618{
1619 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1620 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1621
1622 uvd_v7_0_set_bypass_mode(adev, enable);
1623
1624 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
1625 return 0;
1626
1627 if (enable) {
1628 /* disable HW gating and enable Sw gating */
1629 uvd_v7_0_set_sw_clock_gating(adev);
1630 } else {
1631 /* wait for STATUS to clear */
1632 if (uvd_v7_0_wait_for_idle(handle))
1633 return -EBUSY;
1634
1635 /* enable HW gates because UVD is idle */
1636 /* uvd_v7_0_set_hw_clock_gating(adev); */
1637 }
1638
1639 return 0;
1640}
1641
1642static int uvd_v7_0_set_powergating_state(void *handle,
1643 enum amd_powergating_state state)
1644{
1645 /* This doesn't actually powergate the UVD block.
1646 * That's done in the dpm code via the SMC. This
1647 * just re-inits the block as necessary. The actual
1648 * gating still happens in the dpm code. We should
1649 * revisit this when there is a cleaner line between
1650 * the smc and the hw blocks
1651 */
1652 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1653
1654 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1655 return 0;
1656
1657 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), UVD_POWER_STATUS__UVD_PG_EN_MASK);
1658
1659 if (state == AMD_PG_STATE_GATE) {
1660 uvd_v7_0_stop(adev);
1661 return 0;
1662 } else {
1663 return uvd_v7_0_start(adev);
1664 }
1665}
1666#endif
1667
1668static int uvd_v7_0_set_clockgating_state(void *handle,
1669 enum amd_clockgating_state state)
1670{
1671 /* needed for driver unload*/
1672 return 0;
1673}
1674
1675const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
1676 .name = "uvd_v7_0",
1677 .early_init = uvd_v7_0_early_init,
1678 .late_init = NULL,
1679 .sw_init = uvd_v7_0_sw_init,
1680 .sw_fini = uvd_v7_0_sw_fini,
1681 .hw_init = uvd_v7_0_hw_init,
1682 .hw_fini = uvd_v7_0_hw_fini,
1683 .suspend = uvd_v7_0_suspend,
1684 .resume = uvd_v7_0_resume,
1685 .is_idle = NULL /* uvd_v7_0_is_idle */,
1686 .wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */,
1687 .check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */,
1688 .pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */,
1689 .soft_reset = NULL /* uvd_v7_0_soft_reset */,
1690 .post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */,
1691 .set_clockgating_state = uvd_v7_0_set_clockgating_state,
1692 .set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */,
1693};
1694
1695static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1696 .type = AMDGPU_RING_TYPE_UVD,
1697 .align_mask = 0xf,
1698 .nop = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0),
1699 .support_64bit_ptrs = false,
0eeb68b3 1700 .vmhub = AMDGPU_MMHUB,
09bfb891
LL
1701 .get_rptr = uvd_v7_0_ring_get_rptr,
1702 .get_wptr = uvd_v7_0_ring_get_wptr,
1703 .set_wptr = uvd_v7_0_ring_set_wptr,
1704 .emit_frame_size =
1705 2 + /* uvd_v7_0_ring_emit_hdp_flush */
1706 2 + /* uvd_v7_0_ring_emit_hdp_invalidate */
2e819849 1707 34 + /* uvd_v7_0_ring_emit_vm_flush */
09bfb891
LL
1708 14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
1709 .emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
1710 .emit_ib = uvd_v7_0_ring_emit_ib,
1711 .emit_fence = uvd_v7_0_ring_emit_fence,
1712 .emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
1713 .emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
1714 .emit_hdp_invalidate = uvd_v7_0_ring_emit_hdp_invalidate,
1715 .test_ring = uvd_v7_0_ring_test_ring,
1716 .test_ib = amdgpu_uvd_ring_test_ib,
1717 .insert_nop = amdgpu_ring_insert_nop,
1718 .pad_ib = amdgpu_ring_generic_pad_ib,
1719 .begin_use = amdgpu_uvd_ring_begin_use,
1720 .end_use = amdgpu_uvd_ring_end_use,
1721};
1722
1723static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
1724 .type = AMDGPU_RING_TYPE_UVD_ENC,
1725 .align_mask = 0x3f,
1726 .nop = HEVC_ENC_CMD_NO_OP,
1727 .support_64bit_ptrs = false,
0eeb68b3 1728 .vmhub = AMDGPU_MMHUB,
09bfb891
LL
1729 .get_rptr = uvd_v7_0_enc_ring_get_rptr,
1730 .get_wptr = uvd_v7_0_enc_ring_get_wptr,
1731 .set_wptr = uvd_v7_0_enc_ring_set_wptr,
1732 .emit_frame_size =
2e819849 1733 17 + /* uvd_v7_0_enc_ring_emit_vm_flush */
09bfb891
LL
1734 5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1735 1, /* uvd_v7_0_enc_ring_insert_end */
1736 .emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
1737 .emit_ib = uvd_v7_0_enc_ring_emit_ib,
1738 .emit_fence = uvd_v7_0_enc_ring_emit_fence,
1739 .emit_vm_flush = uvd_v7_0_enc_ring_emit_vm_flush,
1740 .test_ring = uvd_v7_0_enc_ring_test_ring,
1741 .test_ib = uvd_v7_0_enc_ring_test_ib,
1742 .insert_nop = amdgpu_ring_insert_nop,
1743 .insert_end = uvd_v7_0_enc_ring_insert_end,
1744 .pad_ib = amdgpu_ring_generic_pad_ib,
1745 .begin_use = amdgpu_uvd_ring_begin_use,
1746 .end_use = amdgpu_uvd_ring_end_use,
1747};
1748
1749static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
1750{
1751 adev->uvd.ring.funcs = &uvd_v7_0_ring_vm_funcs;
1752 DRM_INFO("UVD is enabled in VM mode\n");
1753}
1754
1755static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1756{
1757 int i;
1758
1759 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
1760 adev->uvd.ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
1761
1762 DRM_INFO("UVD ENC is enabled in VM mode\n");
1763}
1764
1765static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
1766 .set = uvd_v7_0_set_interrupt_state,
1767 .process = uvd_v7_0_process_interrupt,
1768};
1769
1770static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1771{
1772 adev->uvd.irq.num_types = adev->uvd.num_enc_rings + 1;
1773 adev->uvd.irq.funcs = &uvd_v7_0_irq_funcs;
1774}
1775
1776const struct amdgpu_ip_block_version uvd_v7_0_ip_block =
1777{
1778 .type = AMD_IP_BLOCK_TYPE_UVD,
1779 .major = 7,
1780 .minor = 0,
1781 .rev = 0,
1782 .funcs = &uvd_v7_0_ip_funcs,
1783};