drm/amdgpu: split VMID management by VMHUB
[linux-block.git] / drivers / gpu / drm / amd / amdgpu / uvd_v7_0.c
CommitLineData
09bfb891
LL
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include <drm/drmP.h>
26#include "amdgpu.h"
27#include "amdgpu_uvd.h"
28#include "soc15d.h"
29#include "soc15_common.h"
30
31#include "vega10/soc15ip.h"
32#include "vega10/UVD/uvd_7_0_offset.h"
33#include "vega10/UVD/uvd_7_0_sh_mask.h"
34#include "vega10/NBIF/nbif_6_1_offset.h"
35#include "vega10/HDP/hdp_4_0_offset.h"
36#include "vega10/MMHUB/mmhub_1_0_offset.h"
37#include "vega10/MMHUB/mmhub_1_0_sh_mask.h"
38
39static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
40static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
41static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
42static int uvd_v7_0_start(struct amdgpu_device *adev);
43static void uvd_v7_0_stop(struct amdgpu_device *adev);
44
45/**
46 * uvd_v7_0_ring_get_rptr - get read pointer
47 *
48 * @ring: amdgpu_ring pointer
49 *
50 * Returns the current hardware read pointer
51 */
52static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
53{
54 struct amdgpu_device *adev = ring->adev;
55
56 return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR));
57}
58
59/**
60 * uvd_v7_0_enc_ring_get_rptr - get enc read pointer
61 *
62 * @ring: amdgpu_ring pointer
63 *
64 * Returns the current hardware enc read pointer
65 */
66static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
67{
68 struct amdgpu_device *adev = ring->adev;
69
70 if (ring == &adev->uvd.ring_enc[0])
71 return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_RPTR));
72 else
73 return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_RPTR2));
74}
75
76/**
77 * uvd_v7_0_ring_get_wptr - get write pointer
78 *
79 * @ring: amdgpu_ring pointer
80 *
81 * Returns the current hardware write pointer
82 */
83static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
84{
85 struct amdgpu_device *adev = ring->adev;
86
87 return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR));
88}
89
90/**
91 * uvd_v7_0_enc_ring_get_wptr - get enc write pointer
92 *
93 * @ring: amdgpu_ring pointer
94 *
95 * Returns the current hardware enc write pointer
96 */
97static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
98{
99 struct amdgpu_device *adev = ring->adev;
100
101 if (ring == &adev->uvd.ring_enc[0])
102 return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_WPTR));
103 else
104 return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_WPTR2));
105}
106
107/**
108 * uvd_v7_0_ring_set_wptr - set write pointer
109 *
110 * @ring: amdgpu_ring pointer
111 *
112 * Commits the write pointer to the hardware
113 */
114static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
115{
116 struct amdgpu_device *adev = ring->adev;
117
118 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR), lower_32_bits(ring->wptr));
119}
120
121/**
122 * uvd_v7_0_enc_ring_set_wptr - set enc write pointer
123 *
124 * @ring: amdgpu_ring pointer
125 *
126 * Commits the enc write pointer to the hardware
127 */
128static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
129{
130 struct amdgpu_device *adev = ring->adev;
131
132 if (ring == &adev->uvd.ring_enc[0])
133 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_WPTR),
134 lower_32_bits(ring->wptr));
135 else
136 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_WPTR2),
137 lower_32_bits(ring->wptr));
138}
139
140/**
141 * uvd_v7_0_enc_ring_test_ring - test if UVD ENC ring is working
142 *
143 * @ring: the engine to test on
144 *
145 */
146static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
147{
148 struct amdgpu_device *adev = ring->adev;
149 uint32_t rptr = amdgpu_ring_get_rptr(ring);
150 unsigned i;
151 int r;
152
153 r = amdgpu_ring_alloc(ring, 16);
154 if (r) {
155 DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n",
156 ring->idx, r);
157 return r;
158 }
159 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
160 amdgpu_ring_commit(ring);
161
162 for (i = 0; i < adev->usec_timeout; i++) {
163 if (amdgpu_ring_get_rptr(ring) != rptr)
164 break;
165 DRM_UDELAY(1);
166 }
167
168 if (i < adev->usec_timeout) {
169 DRM_INFO("ring test on %d succeeded in %d usecs\n",
170 ring->idx, i);
171 } else {
172 DRM_ERROR("amdgpu: ring %d test failed\n",
173 ring->idx);
174 r = -ETIMEDOUT;
175 }
176
177 return r;
178}
179
180/**
181 * uvd_v7_0_enc_get_create_msg - generate a UVD ENC create msg
182 *
183 * @adev: amdgpu_device pointer
184 * @ring: ring we should submit the msg to
185 * @handle: session handle to use
186 * @fence: optional fence to return
187 *
188 * Open up a stream for HW test
189 */
190static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
191 struct dma_fence **fence)
192{
193 const unsigned ib_size_dw = 16;
194 struct amdgpu_job *job;
195 struct amdgpu_ib *ib;
196 struct dma_fence *f = NULL;
197 uint64_t dummy;
198 int i, r;
199
200 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
201 if (r)
202 return r;
203
204 ib = &job->ibs[0];
205 dummy = ib->gpu_addr + 1024;
206
207 ib->length_dw = 0;
208 ib->ptr[ib->length_dw++] = 0x00000018;
209 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
210 ib->ptr[ib->length_dw++] = handle;
211 ib->ptr[ib->length_dw++] = 0x00000000;
212 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
213 ib->ptr[ib->length_dw++] = dummy;
214
215 ib->ptr[ib->length_dw++] = 0x00000014;
216 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
217 ib->ptr[ib->length_dw++] = 0x0000001c;
218 ib->ptr[ib->length_dw++] = 0x00000000;
219 ib->ptr[ib->length_dw++] = 0x00000000;
220
221 ib->ptr[ib->length_dw++] = 0x00000008;
222 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
223
224 for (i = ib->length_dw; i < ib_size_dw; ++i)
225 ib->ptr[i] = 0x0;
226
227 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
228 job->fence = dma_fence_get(f);
229 if (r)
230 goto err;
231
232 amdgpu_job_free(job);
233 if (fence)
234 *fence = dma_fence_get(f);
235 dma_fence_put(f);
236 return 0;
237
238err:
239 amdgpu_job_free(job);
240 return r;
241}
242
243/**
244 * uvd_v7_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
245 *
246 * @adev: amdgpu_device pointer
247 * @ring: ring we should submit the msg to
248 * @handle: session handle to use
249 * @fence: optional fence to return
250 *
251 * Close up a stream for HW test or if userspace failed to do so
252 */
253int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
254 bool direct, struct dma_fence **fence)
255{
256 const unsigned ib_size_dw = 16;
257 struct amdgpu_job *job;
258 struct amdgpu_ib *ib;
259 struct dma_fence *f = NULL;
260 uint64_t dummy;
261 int i, r;
262
263 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
264 if (r)
265 return r;
266
267 ib = &job->ibs[0];
268 dummy = ib->gpu_addr + 1024;
269
270 ib->length_dw = 0;
271 ib->ptr[ib->length_dw++] = 0x00000018;
272 ib->ptr[ib->length_dw++] = 0x00000001;
273 ib->ptr[ib->length_dw++] = handle;
274 ib->ptr[ib->length_dw++] = 0x00000000;
275 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
276 ib->ptr[ib->length_dw++] = dummy;
277
278 ib->ptr[ib->length_dw++] = 0x00000014;
279 ib->ptr[ib->length_dw++] = 0x00000002;
280 ib->ptr[ib->length_dw++] = 0x0000001c;
281 ib->ptr[ib->length_dw++] = 0x00000000;
282 ib->ptr[ib->length_dw++] = 0x00000000;
283
284 ib->ptr[ib->length_dw++] = 0x00000008;
285 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
286
287 for (i = ib->length_dw; i < ib_size_dw; ++i)
288 ib->ptr[i] = 0x0;
289
290 if (direct) {
291 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
292 job->fence = dma_fence_get(f);
293 if (r)
294 goto err;
295
296 amdgpu_job_free(job);
297 } else {
298 r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
299 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
300 if (r)
301 goto err;
302 }
303
304 if (fence)
305 *fence = dma_fence_get(f);
306 dma_fence_put(f);
307 return 0;
308
309err:
310 amdgpu_job_free(job);
311 return r;
312}
313
314/**
315 * uvd_v7_0_enc_ring_test_ib - test if UVD ENC IBs are working
316 *
317 * @ring: the engine to test on
318 *
319 */
320static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
321{
322 struct dma_fence *fence = NULL;
323 long r;
324
325 r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL);
326 if (r) {
327 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
328 goto error;
329 }
330
331 r = uvd_v7_0_enc_get_destroy_msg(ring, 1, true, &fence);
332 if (r) {
333 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
334 goto error;
335 }
336
337 r = dma_fence_wait_timeout(fence, false, timeout);
338 if (r == 0) {
339 DRM_ERROR("amdgpu: IB test timed out.\n");
340 r = -ETIMEDOUT;
341 } else if (r < 0) {
342 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
343 } else {
344 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
345 r = 0;
346 }
347error:
348 dma_fence_put(fence);
349 return r;
350}
351
352static int uvd_v7_0_early_init(void *handle)
353{
354 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
355
356 adev->uvd.num_enc_rings = 2;
357 uvd_v7_0_set_ring_funcs(adev);
358 uvd_v7_0_set_enc_ring_funcs(adev);
359 uvd_v7_0_set_irq_funcs(adev);
360
361 return 0;
362}
363
364static int uvd_v7_0_sw_init(void *handle)
365{
366 struct amdgpu_ring *ring;
367 struct amd_sched_rq *rq;
368 int i, r;
369 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
370
371 /* UVD TRAP */
372 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UVD, 124, &adev->uvd.irq);
373 if (r)
374 return r;
375
376 /* UVD ENC TRAP */
377 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
378 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UVD, i + 119, &adev->uvd.irq);
379 if (r)
380 return r;
381 }
382
383 r = amdgpu_uvd_sw_init(adev);
384 if (r)
385 return r;
386
387 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
388 const struct common_firmware_header *hdr;
389 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
390 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].ucode_id = AMDGPU_UCODE_ID_UVD;
391 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
392 adev->firmware.fw_size +=
393 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
394 DRM_INFO("PSP loading UVD firmware\n");
395 }
396
397 ring = &adev->uvd.ring_enc[0];
398 rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
399 r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
400 rq, amdgpu_sched_jobs);
401 if (r) {
402 DRM_ERROR("Failed setting up UVD ENC run queue.\n");
403 return r;
404 }
405
406 r = amdgpu_uvd_resume(adev);
407 if (r)
408 return r;
409
410 ring = &adev->uvd.ring;
411 sprintf(ring->name, "uvd");
412 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
413 if (r)
414 return r;
415
416 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
417 ring = &adev->uvd.ring_enc[i];
418 sprintf(ring->name, "uvd_enc%d", i);
419 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
420 if (r)
421 return r;
422 }
423
424 return r;
425}
426
427static int uvd_v7_0_sw_fini(void *handle)
428{
429 int i, r;
430 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
431
432 r = amdgpu_uvd_suspend(adev);
433 if (r)
434 return r;
435
436 amd_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
437
438 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
439 amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
440
50237287 441 return amdgpu_uvd_sw_fini(adev);
09bfb891
LL
442}
443
444/**
445 * uvd_v7_0_hw_init - start and test UVD block
446 *
447 * @adev: amdgpu_device pointer
448 *
449 * Initialize the hardware, boot up the VCPU and do some testing
450 */
451static int uvd_v7_0_hw_init(void *handle)
452{
453 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
454 struct amdgpu_ring *ring = &adev->uvd.ring;
455 uint32_t tmp;
456 int i, r;
457
458 r = uvd_v7_0_start(adev);
459 if (r)
460 goto done;
461
462 ring->ready = true;
463 r = amdgpu_ring_test_ring(ring);
464 if (r) {
465 ring->ready = false;
466 goto done;
467 }
468
469 r = amdgpu_ring_alloc(ring, 10);
470 if (r) {
471 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
472 goto done;
473 }
474
475 tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
476 mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
477 amdgpu_ring_write(ring, tmp);
478 amdgpu_ring_write(ring, 0xFFFFF);
479
480 tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
481 mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
482 amdgpu_ring_write(ring, tmp);
483 amdgpu_ring_write(ring, 0xFFFFF);
484
485 tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
486 mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
487 amdgpu_ring_write(ring, tmp);
488 amdgpu_ring_write(ring, 0xFFFFF);
489
490 /* Clear timeout status bits */
491 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0,
492 mmUVD_SEMA_TIMEOUT_STATUS), 0));
493 amdgpu_ring_write(ring, 0x8);
494
495 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0,
496 mmUVD_SEMA_CNTL), 0));
497 amdgpu_ring_write(ring, 3);
498
499 amdgpu_ring_commit(ring);
500
501 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
502 ring = &adev->uvd.ring_enc[i];
503 ring->ready = true;
504 r = amdgpu_ring_test_ring(ring);
505 if (r) {
506 ring->ready = false;
507 goto done;
508 }
509 }
510
511done:
512 if (!r)
513 DRM_INFO("UVD and UVD ENC initialized successfully.\n");
514
515 return r;
516}
517
518/**
519 * uvd_v7_0_hw_fini - stop the hardware block
520 *
521 * @adev: amdgpu_device pointer
522 *
523 * Stop the UVD block, mark ring as not ready any more
524 */
525static int uvd_v7_0_hw_fini(void *handle)
526{
527 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
528 struct amdgpu_ring *ring = &adev->uvd.ring;
529
530 uvd_v7_0_stop(adev);
531 ring->ready = false;
532
533 return 0;
534}
535
536static int uvd_v7_0_suspend(void *handle)
537{
538 int r;
539 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
540
541 r = uvd_v7_0_hw_fini(adev);
542 if (r)
543 return r;
544
545 /* Skip this for APU for now */
50237287 546 if (!(adev->flags & AMD_IS_APU))
09bfb891 547 r = amdgpu_uvd_suspend(adev);
09bfb891
LL
548
549 return r;
550}
551
552static int uvd_v7_0_resume(void *handle)
553{
554 int r;
555 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
556
557 /* Skip this for APU for now */
558 if (!(adev->flags & AMD_IS_APU)) {
559 r = amdgpu_uvd_resume(adev);
560 if (r)
561 return r;
562 }
50237287 563 return uvd_v7_0_hw_init(adev);
09bfb891
LL
564}
565
566/**
567 * uvd_v7_0_mc_resume - memory controller programming
568 *
569 * @adev: amdgpu_device pointer
570 *
571 * Let the UVD memory controller know it's offsets
572 */
573static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
574{
575 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
576 uint32_t offset;
577
578 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
579 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
580 lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
581 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
582 upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
583 offset = 0;
584 } else {
585 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
586 lower_32_bits(adev->uvd.gpu_addr));
587 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
588 upper_32_bits(adev->uvd.gpu_addr));
589 offset = size;
590 }
591
592 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
593 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
594 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size);
595
596 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
597 lower_32_bits(adev->uvd.gpu_addr + offset));
598 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
599 upper_32_bits(adev->uvd.gpu_addr + offset));
600 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
601 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
602
603 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
604 lower_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
605 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
606 upper_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
607 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
608 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2),
609 AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
610
611 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_ADDR_CONFIG),
612 adev->gfx.config.gb_addr_config);
613 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG),
614 adev->gfx.config.gb_addr_config);
615 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG),
616 adev->gfx.config.gb_addr_config);
617
618 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
619}
620
621/**
622 * uvd_v7_0_start - start UVD block
623 *
624 * @adev: amdgpu_device pointer
625 *
626 * Setup and start the UVD block
627 */
628static int uvd_v7_0_start(struct amdgpu_device *adev)
629{
630 struct amdgpu_ring *ring = &adev->uvd.ring;
631 uint32_t rb_bufsz, tmp;
632 uint32_t lmi_swap_cntl;
633 uint32_t mp_swap_cntl;
634 int i, j, r;
635
636 /* disable DPG */
637 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
638 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
639
640 /* disable byte swapping */
641 lmi_swap_cntl = 0;
642 mp_swap_cntl = 0;
643
644 uvd_v7_0_mc_resume(adev);
645
646 /* disable clock gating */
647 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL), 0,
648 ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
649
650 /* disable interupt */
651 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
652 ~UVD_MASTINT_EN__VCPU_EN_MASK);
653
654 /* stall UMC and register bus before resetting VCPU */
655 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
656 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
657 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
658 mdelay(1);
659
660 /* put LMI, VCPU, RBC etc... into reset */
661 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
662 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
663 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
664 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
665 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
666 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
667 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
668 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
669 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
670 mdelay(5);
671
672 /* initialize UVD memory controller */
673 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL),
674 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
675 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
676 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
677 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
678 UVD_LMI_CTRL__REQ_MODE_MASK |
679 0x00100000L);
680
681#ifdef __BIG_ENDIAN
682 /* swap (8 in 32) RB and IB */
683 lmi_swap_cntl = 0xa;
684 mp_swap_cntl = 0;
685#endif
686 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_SWAP_CNTL), lmi_swap_cntl);
687 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MP_SWAP_CNTL), mp_swap_cntl);
688
689 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA0), 0x40c2040);
690 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA1), 0x0);
691 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB0), 0x40c2040);
692 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB1), 0x0);
693 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_ALU), 0);
694 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUX), 0x88);
695
696 /* take all subblocks out of reset, except VCPU */
697 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
698 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
699 mdelay(5);
700
701 /* enable VCPU clock */
702 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
703 UVD_VCPU_CNTL__CLK_EN_MASK);
704
705 /* enable UMC */
706 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
707 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
708
709 /* boot up the VCPU */
710 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0);
711 mdelay(10);
712
713 for (i = 0; i < 10; ++i) {
714 uint32_t status;
715
716 for (j = 0; j < 100; ++j) {
717 status = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS));
718 if (status & 2)
719 break;
720 mdelay(10);
721 }
722 r = 0;
723 if (status & 2)
724 break;
725
726 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
727 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
728 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
729 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
730 mdelay(10);
731 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
732 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
733 mdelay(10);
734 r = -1;
735 }
736
737 if (r) {
738 DRM_ERROR("UVD not responding, giving up!!!\n");
739 return r;
740 }
741 /* enable master interrupt */
742 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
743 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
744 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
745
746 /* clear the bit 4 of UVD_STATUS */
747 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
748 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
749
750 /* force RBC into idle state */
751 rb_bufsz = order_base_2(ring->ring_size);
752 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
753 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
754 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
755 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
756 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
757 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
758 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp);
759
760 /* set the write pointer delay */
761 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL), 0);
762
763 /* set the wb address */
764 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR),
765 (upper_32_bits(ring->gpu_addr) >> 2));
766
767 /* programm the RB_BASE for ring buffer */
768 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
769 lower_32_bits(ring->gpu_addr));
770 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
771 upper_32_bits(ring->gpu_addr));
772
773 /* Initialize the ring buffer's read and write pointers */
774 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR), 0);
775
776 ring->wptr = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR));
777 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR),
778 lower_32_bits(ring->wptr));
779
780 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
781 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
782
783 ring = &adev->uvd.ring_enc[0];
784 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_RPTR), lower_32_bits(ring->wptr));
785 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_WPTR), lower_32_bits(ring->wptr));
786 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO), ring->gpu_addr);
787 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
788 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE), ring->ring_size / 4);
789
790 ring = &adev->uvd.ring_enc[1];
791 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_RPTR2), lower_32_bits(ring->wptr));
792 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_WPTR2), lower_32_bits(ring->wptr));
793 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO2), ring->gpu_addr);
794 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI2), upper_32_bits(ring->gpu_addr));
795 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE2), ring->ring_size / 4);
796
797 return 0;
798}
799
800/**
801 * uvd_v7_0_stop - stop UVD block
802 *
803 * @adev: amdgpu_device pointer
804 *
805 * stop the UVD block
806 */
807static void uvd_v7_0_stop(struct amdgpu_device *adev)
808{
809 /* force RBC into idle state */
810 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0x11010101);
811
812 /* Stall UMC and register bus before resetting VCPU */
813 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
814 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
815 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
816 mdelay(1);
817
818 /* put VCPU into reset */
819 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
820 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
821 mdelay(5);
822
823 /* disable VCPU clock */
824 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0x0);
825
826 /* Unstall UMC and register bus */
827 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
828 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
829}
830
831/**
832 * uvd_v7_0_ring_emit_fence - emit an fence & trap command
833 *
834 * @ring: amdgpu_ring pointer
835 * @fence: fence to emit
836 *
837 * Write a fence and a trap command to the ring.
838 */
839static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
840 unsigned flags)
841{
842 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
843
844 amdgpu_ring_write(ring,
845 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
846 amdgpu_ring_write(ring, seq);
847 amdgpu_ring_write(ring,
848 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
849 amdgpu_ring_write(ring, addr & 0xffffffff);
850 amdgpu_ring_write(ring,
851 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
852 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
853 amdgpu_ring_write(ring,
854 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
855 amdgpu_ring_write(ring, 0);
856
857 amdgpu_ring_write(ring,
858 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
859 amdgpu_ring_write(ring, 0);
860 amdgpu_ring_write(ring,
861 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
862 amdgpu_ring_write(ring, 0);
863 amdgpu_ring_write(ring,
864 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
865 amdgpu_ring_write(ring, 2);
866}
867
868/**
869 * uvd_v7_0_enc_ring_emit_fence - emit an enc fence & trap command
870 *
871 * @ring: amdgpu_ring pointer
872 * @fence: fence to emit
873 *
874 * Write enc a fence and a trap command to the ring.
875 */
876static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
877 u64 seq, unsigned flags)
878{
879 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
880
881 amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
882 amdgpu_ring_write(ring, addr);
883 amdgpu_ring_write(ring, upper_32_bits(addr));
884 amdgpu_ring_write(ring, seq);
885 amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
886}
887
888/**
889 * uvd_v7_0_ring_emit_hdp_flush - emit an hdp flush
890 *
891 * @ring: amdgpu_ring pointer
892 *
893 * Emits an hdp flush.
894 */
895static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
896{
897 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(NBIF, 0,
898 mmHDP_MEM_COHERENCY_FLUSH_CNTL), 0));
899 amdgpu_ring_write(ring, 0);
900}
901
902/**
903 * uvd_v7_0_ring_hdp_invalidate - emit an hdp invalidate
904 *
905 * @ring: amdgpu_ring pointer
906 *
907 * Emits an hdp invalidate.
908 */
909static void uvd_v7_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
910{
911 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0), 0));
912 amdgpu_ring_write(ring, 1);
913}
914
915/**
916 * uvd_v7_0_ring_test_ring - register write test
917 *
918 * @ring: amdgpu_ring pointer
919 *
920 * Test if we can successfully write to the context register
921 */
922static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
923{
924 struct amdgpu_device *adev = ring->adev;
925 uint32_t tmp = 0;
926 unsigned i;
927 int r;
928
929 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
930 r = amdgpu_ring_alloc(ring, 3);
931 if (r) {
932 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
933 ring->idx, r);
934 return r;
935 }
936 amdgpu_ring_write(ring,
937 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
938 amdgpu_ring_write(ring, 0xDEADBEEF);
939 amdgpu_ring_commit(ring);
940 for (i = 0; i < adev->usec_timeout; i++) {
941 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
942 if (tmp == 0xDEADBEEF)
943 break;
944 DRM_UDELAY(1);
945 }
946
947 if (i < adev->usec_timeout) {
948 DRM_INFO("ring test on %d succeeded in %d usecs\n",
949 ring->idx, i);
950 } else {
951 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
952 ring->idx, tmp);
953 r = -EINVAL;
954 }
955 return r;
956}
957
958/**
959 * uvd_v7_0_ring_emit_ib - execute indirect buffer
960 *
961 * @ring: amdgpu_ring pointer
962 * @ib: indirect buffer to execute
963 *
964 * Write ring commands to execute the indirect buffer
965 */
966static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
967 struct amdgpu_ib *ib,
968 unsigned vm_id, bool ctx_switch)
969{
970 amdgpu_ring_write(ring,
971 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
972 amdgpu_ring_write(ring, vm_id);
973
974 amdgpu_ring_write(ring,
975 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
976 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
977 amdgpu_ring_write(ring,
978 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
979 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
980 amdgpu_ring_write(ring,
981 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0));
982 amdgpu_ring_write(ring, ib->length_dw);
983}
984
985/**
986 * uvd_v7_0_enc_ring_emit_ib - enc execute indirect buffer
987 *
988 * @ring: amdgpu_ring pointer
989 * @ib: indirect buffer to execute
990 *
991 * Write enc ring commands to execute the indirect buffer
992 */
993static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
994 struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
995{
996 amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
997 amdgpu_ring_write(ring, vm_id);
998 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
999 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1000 amdgpu_ring_write(ring, ib->length_dw);
1001}
1002
1003static void uvd_v7_0_vm_reg_write(struct amdgpu_ring *ring,
1004 uint32_t data0, uint32_t data1)
1005{
1006 amdgpu_ring_write(ring,
1007 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1008 amdgpu_ring_write(ring, data0);
1009 amdgpu_ring_write(ring,
1010 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1011 amdgpu_ring_write(ring, data1);
1012 amdgpu_ring_write(ring,
1013 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1014 amdgpu_ring_write(ring, 8);
1015}
1016
1017static void uvd_v7_0_vm_reg_wait(struct amdgpu_ring *ring,
1018 uint32_t data0, uint32_t data1, uint32_t mask)
1019{
1020 amdgpu_ring_write(ring,
1021 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1022 amdgpu_ring_write(ring, data0);
1023 amdgpu_ring_write(ring,
1024 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1025 amdgpu_ring_write(ring, data1);
1026 amdgpu_ring_write(ring,
1027 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
1028 amdgpu_ring_write(ring, mask);
1029 amdgpu_ring_write(ring,
1030 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1031 amdgpu_ring_write(ring, 12);
1032}
1033
1034static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1035 unsigned vm_id, uint64_t pd_addr)
1036{
03f89feb 1037 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
09bfb891
LL
1038 uint32_t data0, data1, mask;
1039 unsigned eng = ring->idx;
1040 unsigned i;
1041
1042 pd_addr = pd_addr | 0x1; /* valid bit */
1043 /* now only use physical base address of PDE and valid */
1044 BUG_ON(pd_addr & 0xFFFF00000000003EULL);
1045
1046 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
1047 struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
09bfb891
LL
1048
1049 data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
1050 data1 = upper_32_bits(pd_addr);
1051 uvd_v7_0_vm_reg_write(ring, data0, data1);
1052
1053 data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
1054 data1 = lower_32_bits(pd_addr);
1055 uvd_v7_0_vm_reg_write(ring, data0, data1);
1056
1057 data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
1058 data1 = lower_32_bits(pd_addr);
1059 mask = 0xffffffff;
1060 uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
1061
1062 /* flush TLB */
1063 data0 = (hub->vm_inv_eng0_req + eng) << 2;
1064 data1 = req;
1065 uvd_v7_0_vm_reg_write(ring, data0, data1);
1066
1067 /* wait for flush */
1068 data0 = (hub->vm_inv_eng0_ack + eng) << 2;
1069 data1 = 1 << vm_id;
1070 mask = 1 << vm_id;
1071 uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
1072 }
1073}
1074
1075static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1076{
1077 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1078}
1079
1080static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1081 unsigned int vm_id, uint64_t pd_addr)
1082{
03f89feb 1083 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
09bfb891
LL
1084 unsigned eng = ring->idx;
1085 unsigned i;
1086
1087 pd_addr = pd_addr | 0x1; /* valid bit */
1088 /* now only use physical base address of PDE and valid */
1089 BUG_ON(pd_addr & 0xFFFF00000000003EULL);
1090
1091 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
1092 struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
09bfb891
LL
1093
1094 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1095 amdgpu_ring_write(ring,
1096 (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
1097 amdgpu_ring_write(ring, upper_32_bits(pd_addr));
1098
1099 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1100 amdgpu_ring_write(ring,
1101 (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
1102 amdgpu_ring_write(ring, lower_32_bits(pd_addr));
1103
1104 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1105 amdgpu_ring_write(ring,
1106 (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
1107 amdgpu_ring_write(ring, 0xffffffff);
1108 amdgpu_ring_write(ring, lower_32_bits(pd_addr));
1109
1110 /* flush TLB */
1111 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1112 amdgpu_ring_write(ring, (hub->vm_inv_eng0_req + eng) << 2);
1113 amdgpu_ring_write(ring, req);
1114
1115 /* wait for flush */
1116 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1117 amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
1118 amdgpu_ring_write(ring, 1 << vm_id);
1119 amdgpu_ring_write(ring, 1 << vm_id);
1120 }
1121}
1122
1123#if 0
1124static bool uvd_v7_0_is_idle(void *handle)
1125{
1126 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1127
1128 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1129}
1130
1131static int uvd_v7_0_wait_for_idle(void *handle)
1132{
1133 unsigned i;
1134 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1135
1136 for (i = 0; i < adev->usec_timeout; i++) {
1137 if (uvd_v7_0_is_idle(handle))
1138 return 0;
1139 }
1140 return -ETIMEDOUT;
1141}
1142
1143#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
1144static bool uvd_v7_0_check_soft_reset(void *handle)
1145{
1146 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1147 u32 srbm_soft_reset = 0;
1148 u32 tmp = RREG32(mmSRBM_STATUS);
1149
1150 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1151 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1152 (RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS) &
1153 AMDGPU_UVD_STATUS_BUSY_MASK)))
1154 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1155 SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1156
1157 if (srbm_soft_reset) {
1158 adev->uvd.srbm_soft_reset = srbm_soft_reset;
1159 return true;
1160 } else {
1161 adev->uvd.srbm_soft_reset = 0;
1162 return false;
1163 }
1164}
1165
1166static int uvd_v7_0_pre_soft_reset(void *handle)
1167{
1168 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1169
1170 if (!adev->uvd.srbm_soft_reset)
1171 return 0;
1172
1173 uvd_v7_0_stop(adev);
1174 return 0;
1175}
1176
1177static int uvd_v7_0_soft_reset(void *handle)
1178{
1179 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1180 u32 srbm_soft_reset;
1181
1182 if (!adev->uvd.srbm_soft_reset)
1183 return 0;
1184 srbm_soft_reset = adev->uvd.srbm_soft_reset;
1185
1186 if (srbm_soft_reset) {
1187 u32 tmp;
1188
1189 tmp = RREG32(mmSRBM_SOFT_RESET);
1190 tmp |= srbm_soft_reset;
1191 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1192 WREG32(mmSRBM_SOFT_RESET, tmp);
1193 tmp = RREG32(mmSRBM_SOFT_RESET);
1194
1195 udelay(50);
1196
1197 tmp &= ~srbm_soft_reset;
1198 WREG32(mmSRBM_SOFT_RESET, tmp);
1199 tmp = RREG32(mmSRBM_SOFT_RESET);
1200
1201 /* Wait a little for things to settle down */
1202 udelay(50);
1203 }
1204
1205 return 0;
1206}
1207
1208static int uvd_v7_0_post_soft_reset(void *handle)
1209{
1210 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1211
1212 if (!adev->uvd.srbm_soft_reset)
1213 return 0;
1214
1215 mdelay(5);
1216
1217 return uvd_v7_0_start(adev);
1218}
1219#endif
1220
1221static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
1222 struct amdgpu_irq_src *source,
1223 unsigned type,
1224 enum amdgpu_interrupt_state state)
1225{
1226 // TODO
1227 return 0;
1228}
1229
1230static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
1231 struct amdgpu_irq_src *source,
1232 struct amdgpu_iv_entry *entry)
1233{
1234 DRM_DEBUG("IH: UVD TRAP\n");
1235 switch (entry->src_id) {
1236 case 124:
1237 amdgpu_fence_process(&adev->uvd.ring);
1238 break;
1239 case 119:
1240 amdgpu_fence_process(&adev->uvd.ring_enc[0]);
1241 break;
1242 case 120:
1243 amdgpu_fence_process(&adev->uvd.ring_enc[1]);
1244 break;
1245 default:
1246 DRM_ERROR("Unhandled interrupt: %d %d\n",
1247 entry->src_id, entry->src_data[0]);
1248 break;
1249 }
1250
1251 return 0;
1252}
1253
1254#if 0
1255static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
1256{
1257 uint32_t data, data1, data2, suvd_flags;
1258
1259 data = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL));
1260 data1 = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SUVD_CGC_GATE));
1261 data2 = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SUVD_CGC_CTRL));
1262
1263 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1264 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1265
1266 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1267 UVD_SUVD_CGC_GATE__SIT_MASK |
1268 UVD_SUVD_CGC_GATE__SMP_MASK |
1269 UVD_SUVD_CGC_GATE__SCM_MASK |
1270 UVD_SUVD_CGC_GATE__SDB_MASK;
1271
1272 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1273 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1274 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1275
1276 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1277 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1278 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1279 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1280 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1281 UVD_CGC_CTRL__SYS_MODE_MASK |
1282 UVD_CGC_CTRL__UDEC_MODE_MASK |
1283 UVD_CGC_CTRL__MPEG2_MODE_MASK |
1284 UVD_CGC_CTRL__REGS_MODE_MASK |
1285 UVD_CGC_CTRL__RBC_MODE_MASK |
1286 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1287 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1288 UVD_CGC_CTRL__IDCT_MODE_MASK |
1289 UVD_CGC_CTRL__MPRD_MODE_MASK |
1290 UVD_CGC_CTRL__MPC_MODE_MASK |
1291 UVD_CGC_CTRL__LBSI_MODE_MASK |
1292 UVD_CGC_CTRL__LRBBM_MODE_MASK |
1293 UVD_CGC_CTRL__WCB_MODE_MASK |
1294 UVD_CGC_CTRL__VCPU_MODE_MASK |
1295 UVD_CGC_CTRL__JPEG_MODE_MASK |
1296 UVD_CGC_CTRL__JPEG2_MODE_MASK |
1297 UVD_CGC_CTRL__SCPU_MODE_MASK);
1298 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1299 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1300 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1301 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1302 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1303 data1 |= suvd_flags;
1304
1305 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL), data);
1306 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_GATE), 0);
1307 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SUVD_CGC_GATE), data1);
1308 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SUVD_CGC_CTRL), data2);
1309}
1310
1311static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
1312{
1313 uint32_t data, data1, cgc_flags, suvd_flags;
1314
1315 data = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_GATE));
1316 data1 = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SUVD_CGC_GATE));
1317
1318 cgc_flags = UVD_CGC_GATE__SYS_MASK |
1319 UVD_CGC_GATE__UDEC_MASK |
1320 UVD_CGC_GATE__MPEG2_MASK |
1321 UVD_CGC_GATE__RBC_MASK |
1322 UVD_CGC_GATE__LMI_MC_MASK |
1323 UVD_CGC_GATE__IDCT_MASK |
1324 UVD_CGC_GATE__MPRD_MASK |
1325 UVD_CGC_GATE__MPC_MASK |
1326 UVD_CGC_GATE__LBSI_MASK |
1327 UVD_CGC_GATE__LRBBM_MASK |
1328 UVD_CGC_GATE__UDEC_RE_MASK |
1329 UVD_CGC_GATE__UDEC_CM_MASK |
1330 UVD_CGC_GATE__UDEC_IT_MASK |
1331 UVD_CGC_GATE__UDEC_DB_MASK |
1332 UVD_CGC_GATE__UDEC_MP_MASK |
1333 UVD_CGC_GATE__WCB_MASK |
1334 UVD_CGC_GATE__VCPU_MASK |
1335 UVD_CGC_GATE__SCPU_MASK |
1336 UVD_CGC_GATE__JPEG_MASK |
1337 UVD_CGC_GATE__JPEG2_MASK;
1338
1339 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1340 UVD_SUVD_CGC_GATE__SIT_MASK |
1341 UVD_SUVD_CGC_GATE__SMP_MASK |
1342 UVD_SUVD_CGC_GATE__SCM_MASK |
1343 UVD_SUVD_CGC_GATE__SDB_MASK;
1344
1345 data |= cgc_flags;
1346 data1 |= suvd_flags;
1347
1348 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_GATE), data);
1349 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SUVD_CGC_GATE), data1);
1350}
1351
1352static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
1353{
1354 u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
1355
1356 if (enable)
1357 tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1358 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1359 else
1360 tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1361 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1362
1363 WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
1364}
1365
1366
1367static int uvd_v7_0_set_clockgating_state(void *handle,
1368 enum amd_clockgating_state state)
1369{
1370 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1371 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1372
1373 uvd_v7_0_set_bypass_mode(adev, enable);
1374
1375 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
1376 return 0;
1377
1378 if (enable) {
1379 /* disable HW gating and enable Sw gating */
1380 uvd_v7_0_set_sw_clock_gating(adev);
1381 } else {
1382 /* wait for STATUS to clear */
1383 if (uvd_v7_0_wait_for_idle(handle))
1384 return -EBUSY;
1385
1386 /* enable HW gates because UVD is idle */
1387 /* uvd_v7_0_set_hw_clock_gating(adev); */
1388 }
1389
1390 return 0;
1391}
1392
1393static int uvd_v7_0_set_powergating_state(void *handle,
1394 enum amd_powergating_state state)
1395{
1396 /* This doesn't actually powergate the UVD block.
1397 * That's done in the dpm code via the SMC. This
1398 * just re-inits the block as necessary. The actual
1399 * gating still happens in the dpm code. We should
1400 * revisit this when there is a cleaner line between
1401 * the smc and the hw blocks
1402 */
1403 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1404
1405 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1406 return 0;
1407
1408 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), UVD_POWER_STATUS__UVD_PG_EN_MASK);
1409
1410 if (state == AMD_PG_STATE_GATE) {
1411 uvd_v7_0_stop(adev);
1412 return 0;
1413 } else {
1414 return uvd_v7_0_start(adev);
1415 }
1416}
1417#endif
1418
1419static int uvd_v7_0_set_clockgating_state(void *handle,
1420 enum amd_clockgating_state state)
1421{
1422 /* needed for driver unload*/
1423 return 0;
1424}
1425
1426const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
1427 .name = "uvd_v7_0",
1428 .early_init = uvd_v7_0_early_init,
1429 .late_init = NULL,
1430 .sw_init = uvd_v7_0_sw_init,
1431 .sw_fini = uvd_v7_0_sw_fini,
1432 .hw_init = uvd_v7_0_hw_init,
1433 .hw_fini = uvd_v7_0_hw_fini,
1434 .suspend = uvd_v7_0_suspend,
1435 .resume = uvd_v7_0_resume,
1436 .is_idle = NULL /* uvd_v7_0_is_idle */,
1437 .wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */,
1438 .check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */,
1439 .pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */,
1440 .soft_reset = NULL /* uvd_v7_0_soft_reset */,
1441 .post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */,
1442 .set_clockgating_state = uvd_v7_0_set_clockgating_state,
1443 .set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */,
1444};
1445
1446static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1447 .type = AMDGPU_RING_TYPE_UVD,
1448 .align_mask = 0xf,
1449 .nop = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0),
1450 .support_64bit_ptrs = false,
0eeb68b3 1451 .vmhub = AMDGPU_MMHUB,
09bfb891
LL
1452 .get_rptr = uvd_v7_0_ring_get_rptr,
1453 .get_wptr = uvd_v7_0_ring_get_wptr,
1454 .set_wptr = uvd_v7_0_ring_set_wptr,
1455 .emit_frame_size =
1456 2 + /* uvd_v7_0_ring_emit_hdp_flush */
1457 2 + /* uvd_v7_0_ring_emit_hdp_invalidate */
1458 34 * AMDGPU_MAX_VMHUBS + /* uvd_v7_0_ring_emit_vm_flush */
1459 14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
1460 .emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
1461 .emit_ib = uvd_v7_0_ring_emit_ib,
1462 .emit_fence = uvd_v7_0_ring_emit_fence,
1463 .emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
1464 .emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
1465 .emit_hdp_invalidate = uvd_v7_0_ring_emit_hdp_invalidate,
1466 .test_ring = uvd_v7_0_ring_test_ring,
1467 .test_ib = amdgpu_uvd_ring_test_ib,
1468 .insert_nop = amdgpu_ring_insert_nop,
1469 .pad_ib = amdgpu_ring_generic_pad_ib,
1470 .begin_use = amdgpu_uvd_ring_begin_use,
1471 .end_use = amdgpu_uvd_ring_end_use,
1472};
1473
1474static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
1475 .type = AMDGPU_RING_TYPE_UVD_ENC,
1476 .align_mask = 0x3f,
1477 .nop = HEVC_ENC_CMD_NO_OP,
1478 .support_64bit_ptrs = false,
0eeb68b3 1479 .vmhub = AMDGPU_MMHUB,
09bfb891
LL
1480 .get_rptr = uvd_v7_0_enc_ring_get_rptr,
1481 .get_wptr = uvd_v7_0_enc_ring_get_wptr,
1482 .set_wptr = uvd_v7_0_enc_ring_set_wptr,
1483 .emit_frame_size =
1484 17 * AMDGPU_MAX_VMHUBS + /* uvd_v7_0_enc_ring_emit_vm_flush */
1485 5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1486 1, /* uvd_v7_0_enc_ring_insert_end */
1487 .emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
1488 .emit_ib = uvd_v7_0_enc_ring_emit_ib,
1489 .emit_fence = uvd_v7_0_enc_ring_emit_fence,
1490 .emit_vm_flush = uvd_v7_0_enc_ring_emit_vm_flush,
1491 .test_ring = uvd_v7_0_enc_ring_test_ring,
1492 .test_ib = uvd_v7_0_enc_ring_test_ib,
1493 .insert_nop = amdgpu_ring_insert_nop,
1494 .insert_end = uvd_v7_0_enc_ring_insert_end,
1495 .pad_ib = amdgpu_ring_generic_pad_ib,
1496 .begin_use = amdgpu_uvd_ring_begin_use,
1497 .end_use = amdgpu_uvd_ring_end_use,
1498};
1499
1500static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
1501{
1502 adev->uvd.ring.funcs = &uvd_v7_0_ring_vm_funcs;
1503 DRM_INFO("UVD is enabled in VM mode\n");
1504}
1505
1506static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1507{
1508 int i;
1509
1510 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
1511 adev->uvd.ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
1512
1513 DRM_INFO("UVD ENC is enabled in VM mode\n");
1514}
1515
1516static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
1517 .set = uvd_v7_0_set_interrupt_state,
1518 .process = uvd_v7_0_process_interrupt,
1519};
1520
1521static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1522{
1523 adev->uvd.irq.num_types = adev->uvd.num_enc_rings + 1;
1524 adev->uvd.irq.funcs = &uvd_v7_0_irq_funcs;
1525}
1526
1527const struct amdgpu_ip_block_version uvd_v7_0_ip_block =
1528{
1529 .type = AMD_IP_BLOCK_TYPE_UVD,
1530 .major = 7,
1531 .minor = 0,
1532 .rev = 0,
1533 .funcs = &uvd_v7_0_ip_funcs,
1534};