drm/amdgpu: add VCN2.5 VCPU start and stop
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vcn.c
CommitLineData
95d0906f
LL
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26
27#include <linux/firmware.h>
28#include <linux/module.h>
fdf2f6c5
SR
29#include <linux/pci.h>
30
95d0906f
LL
31#include <drm/drm.h>
32
33#include "amdgpu.h"
34#include "amdgpu_pm.h"
35#include "amdgpu_vcn.h"
36#include "soc15d.h"
37#include "soc15_common.h"
38
b1ebd7c0 39#include "vcn/vcn_1_0_offset.h"
bd5d5180 40#include "vcn/vcn_1_0_sh_mask.h"
95d0906f
LL
41
42/* 1 second timeout */
43#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
44
45/* Firmware Names */
46#define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
86771d9a 47#define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin"
8b47cc9b 48#define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
a8790e24 49#define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin"
e149a2f6 50#define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin"
95d0906f
LL
51
52MODULE_FIRMWARE(FIRMWARE_RAVEN);
86771d9a 53MODULE_FIRMWARE(FIRMWARE_PICASSO);
8b47cc9b 54MODULE_FIRMWARE(FIRMWARE_RAVEN2);
a8790e24 55MODULE_FIRMWARE(FIRMWARE_NAVI10);
e149a2f6 56MODULE_FIRMWARE(FIRMWARE_NAVI14);
95d0906f
LL
57
58static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
59
60int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
61{
95d0906f
LL
62 unsigned long bo_size;
63 const char *fw_name;
64 const struct common_firmware_header *hdr;
62d5b8e3 65 unsigned char fw_check;
95d0906f
LL
66 int r;
67
68 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
69
70 switch (adev->asic_type) {
71 case CHIP_RAVEN:
741deade 72 if (adev->rev_id >= 8)
8b47cc9b 73 fw_name = FIRMWARE_RAVEN2;
741deade
AD
74 else if (adev->pdev->device == 0x15d8)
75 fw_name = FIRMWARE_PICASSO;
8b47cc9b
FX
76 else
77 fw_name = FIRMWARE_RAVEN;
95d0906f 78 break;
a8790e24
LL
79 case CHIP_NAVI10:
80 fw_name = FIRMWARE_NAVI10;
450af30c
LL
81 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
82 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
83 adev->vcn.indirect_sram = true;
a8790e24 84 break;
e149a2f6
JZ
85 case CHIP_NAVI14:
86 fw_name = FIRMWARE_NAVI14;
0377b088
XY
87 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
88 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
89 adev->vcn.indirect_sram = true;
e149a2f6 90 break;
95d0906f
LL
91 default:
92 return -EINVAL;
93 }
94
95 r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
96 if (r) {
97 dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
98 fw_name);
99 return r;
100 }
101
102 r = amdgpu_ucode_validate(adev->vcn.fw);
103 if (r) {
104 dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
105 fw_name);
106 release_firmware(adev->vcn.fw);
107 adev->vcn.fw = NULL;
108 return r;
109 }
110
111 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
a0b2ac29 112 adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
95d0906f 113
62d5b8e3
JZ
114 /* Bit 20-23, it is encode major and non-zero for new naming convention.
115 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
116 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
117 * is zero in old naming convention, this field is always zero so far.
118 * These four bits are used to tell which naming convention is present.
119 */
120 fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
121 if (fw_check) {
122 unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
123
124 fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
125 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
126 enc_major = fw_check;
127 dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
128 vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
129 DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
130 enc_major, enc_minor, dec_ver, vep, fw_rev);
131 } else {
132 unsigned int version_major, version_minor, family_id;
133
134 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
135 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
136 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
137 DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
138 version_major, version_minor, family_id);
139 }
95d0906f 140
825da4d9 141 bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
4d77c0f6
LG
142 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
143 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
95d0906f
LL
144 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
145 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
146 &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
147 if (r) {
148 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
149 return r;
150 }
151
a77b9fdf
LL
152 if (adev->vcn.indirect_sram) {
153 r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
154 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.dpg_sram_bo,
155 &adev->vcn.dpg_sram_gpu_addr, &adev->vcn.dpg_sram_cpu_addr);
156 if (r) {
157 dev_err(adev->dev, "(%d) failed to allocate DPG bo\n", r);
158 return r;
159 }
160 }
161
95d0906f
LL
162 return 0;
163}
164
165int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
166{
101c6fee
LL
167 int i;
168
c9533d1b 169 kvfree(adev->vcn.saved_bo);
95d0906f 170
a77b9fdf
LL
171 if (adev->vcn.indirect_sram) {
172 amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo,
173 &adev->vcn.dpg_sram_gpu_addr,
174 (void **)&adev->vcn.dpg_sram_cpu_addr);
175 }
176
95d0906f
LL
177 amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
178 &adev->vcn.gpu_addr,
179 (void **)&adev->vcn.cpu_addr);
180
181 amdgpu_ring_fini(&adev->vcn.ring_dec);
182
101c6fee
LL
183 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
184 amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
185
0c5e4b3e
BZ
186 amdgpu_ring_fini(&adev->vcn.ring_jpeg);
187
95d0906f
LL
188 release_firmware(adev->vcn.fw);
189
190 return 0;
191}
192
193int amdgpu_vcn_suspend(struct amdgpu_device *adev)
194{
195 unsigned size;
196 void *ptr;
197
61ea6f58
RZ
198 cancel_delayed_work_sync(&adev->vcn.idle_work);
199
95d0906f
LL
200 if (adev->vcn.vcpu_bo == NULL)
201 return 0;
202
95d0906f
LL
203 size = amdgpu_bo_size(adev->vcn.vcpu_bo);
204 ptr = adev->vcn.cpu_addr;
205
c9533d1b 206 adev->vcn.saved_bo = kvmalloc(size, GFP_KERNEL);
95d0906f
LL
207 if (!adev->vcn.saved_bo)
208 return -ENOMEM;
209
210 memcpy_fromio(adev->vcn.saved_bo, ptr, size);
211
212 return 0;
213}
214
215int amdgpu_vcn_resume(struct amdgpu_device *adev)
216{
217 unsigned size;
218 void *ptr;
219
220 if (adev->vcn.vcpu_bo == NULL)
221 return -EINVAL;
222
223 size = amdgpu_bo_size(adev->vcn.vcpu_bo);
224 ptr = adev->vcn.cpu_addr;
225
226 if (adev->vcn.saved_bo != NULL) {
227 memcpy_toio(ptr, adev->vcn.saved_bo, size);
c9533d1b 228 kvfree(adev->vcn.saved_bo);
95d0906f
LL
229 adev->vcn.saved_bo = NULL;
230 } else {
231 const struct common_firmware_header *hdr;
232 unsigned offset;
233
234 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
4d77c0f6
LG
235 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
236 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
237 memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
238 le32_to_cpu(hdr->ucode_size_bytes));
239 size -= le32_to_cpu(hdr->ucode_size_bytes);
240 ptr += le32_to_cpu(hdr->ucode_size_bytes);
241 }
95d0906f
LL
242 memset_io(ptr, 0, size);
243 }
244
245 return 0;
246}
247
3e1086cf
LL
248static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
249{
250 struct amdgpu_device *adev =
251 container_of(work, struct amdgpu_device, vcn.idle_work.work);
bd5d5180
JZ
252 unsigned int fences = 0;
253 unsigned int i;
646e906d
AD
254
255 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
256 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
257 }
3e1086cf 258
bd5d5180
JZ
259 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
260 struct dpg_pause_state new_state;
261
262 if (fences)
263 new_state.fw_based = VCN_DPG_STATE__PAUSE;
264 else
265 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
266
267 if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
268 new_state.jpeg = VCN_DPG_STATE__PAUSE;
269 else
270 new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
271
9dc7b02a 272 adev->vcn.pause_dpg_mode(adev, &new_state);
bd5d5180
JZ
273 }
274
7b4e54a9 275 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg);
bd5d5180 276 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
7b4e54a9 277
3e1086cf 278 if (fences == 0) {
3fded222 279 amdgpu_gfx_off_ctrl(adev, true);
6e4cb4e8 280 if (adev->asic_type < CHIP_NAVI10 && adev->pm.dpm_enabled)
3e1086cf 281 amdgpu_dpm_enable_uvd(adev, false);
22cc6c5e
RZ
282 else
283 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
284 AMD_PG_STATE_GATE);
3e1086cf
LL
285 } else {
286 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
287 }
288}
289
290void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
291{
292 struct amdgpu_device *adev = ring->adev;
293 bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
294
c1ee15b3 295 if (set_clocks) {
3fded222 296 amdgpu_gfx_off_ctrl(adev, false);
6e4cb4e8 297 if (adev->asic_type < CHIP_NAVI10 && adev->pm.dpm_enabled)
22cc6c5e
RZ
298 amdgpu_dpm_enable_uvd(adev, true);
299 else
300 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
301 AMD_PG_STATE_UNGATE);
3e1086cf 302 }
bd5d5180
JZ
303
304 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
305 struct dpg_pause_state new_state;
12e8b301
JZ
306 unsigned int fences = 0;
307 unsigned int i;
bd5d5180 308
12e8b301
JZ
309 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
310 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
311 }
312 if (fences)
bd5d5180
JZ
313 new_state.fw_based = VCN_DPG_STATE__PAUSE;
314 else
12e8b301 315 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
bd5d5180 316
12e8b301 317 if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
bd5d5180
JZ
318 new_state.jpeg = VCN_DPG_STATE__PAUSE;
319 else
12e8b301
JZ
320 new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
321
322 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
323 new_state.fw_based = VCN_DPG_STATE__PAUSE;
324 else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
325 new_state.jpeg = VCN_DPG_STATE__PAUSE;
bd5d5180 326
9dc7b02a 327 adev->vcn.pause_dpg_mode(adev, &new_state);
bd5d5180 328 }
3e1086cf
LL
329}
330
331void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
332{
333 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
334}
335
8c303c01
LL
336int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
337{
338 struct amdgpu_device *adev = ring->adev;
339 uint32_t tmp = 0;
340 unsigned i;
341 int r;
342
45a1a48b 343 WREG32(adev->vcn.external.scratch9, 0xCAFEDEAD);
8c303c01 344 r = amdgpu_ring_alloc(ring, 3);
dc9eeff8 345 if (r)
8c303c01 346 return r;
45a1a48b 347 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
8c303c01
LL
348 amdgpu_ring_write(ring, 0xDEADBEEF);
349 amdgpu_ring_commit(ring);
350 for (i = 0; i < adev->usec_timeout; i++) {
45a1a48b 351 tmp = RREG32(adev->vcn.external.scratch9);
8c303c01
LL
352 if (tmp == 0xDEADBEEF)
353 break;
c366be54 354 udelay(1);
8c303c01
LL
355 }
356
dc9eeff8
CK
357 if (i >= adev->usec_timeout)
358 r = -ETIMEDOUT;
359
8c303c01
LL
360 return r;
361}
362
add9f9a8 363static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
4c6530fd 364 struct amdgpu_bo *bo,
add9f9a8 365 struct dma_fence **fence)
95d0906f 366{
add9f9a8
CK
367 struct amdgpu_device *adev = ring->adev;
368 struct dma_fence *f = NULL;
95d0906f
LL
369 struct amdgpu_job *job;
370 struct amdgpu_ib *ib;
95d0906f
LL
371 uint64_t addr;
372 int i, r;
373
95d0906f
LL
374 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
375 if (r)
376 goto err;
377
378 ib = &job->ibs[0];
379 addr = amdgpu_bo_gpu_offset(bo);
60a2309e 380 ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
95d0906f 381 ib->ptr[1] = addr;
60a2309e 382 ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
95d0906f 383 ib->ptr[3] = addr >> 32;
60a2309e 384 ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
95d0906f
LL
385 ib->ptr[5] = 0;
386 for (i = 6; i < 16; i += 2) {
60a2309e 387 ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
95d0906f
LL
388 ib->ptr[i+1] = 0;
389 }
390 ib->length_dw = 16;
391
ee913fd9 392 r = amdgpu_job_submit_direct(job, ring, &f);
4c6530fd
LL
393 if (r)
394 goto err_free;
95d0906f 395
add9f9a8
CK
396 amdgpu_bo_fence(bo, f, false);
397 amdgpu_bo_unreserve(bo);
398 amdgpu_bo_unref(&bo);
95d0906f
LL
399
400 if (fence)
401 *fence = dma_fence_get(f);
95d0906f
LL
402 dma_fence_put(f);
403
404 return 0;
405
406err_free:
407 amdgpu_job_free(job);
408
409err:
add9f9a8
CK
410 amdgpu_bo_unreserve(bo);
411 amdgpu_bo_unref(&bo);
95d0906f
LL
412 return r;
413}
414
415static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
416 struct dma_fence **fence)
417{
418 struct amdgpu_device *adev = ring->adev;
add9f9a8 419 struct amdgpu_bo *bo = NULL;
95d0906f
LL
420 uint32_t *msg;
421 int r, i;
422
add9f9a8
CK
423 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
424 AMDGPU_GEM_DOMAIN_VRAM,
425 &bo, NULL, (void **)&msg);
95d0906f
LL
426 if (r)
427 return r;
428
2d8a425b 429 msg[0] = cpu_to_le32(0x00000028);
3b8f5ab3 430 msg[1] = cpu_to_le32(0x00000038);
2d8a425b 431 msg[2] = cpu_to_le32(0x00000001);
95d0906f 432 msg[3] = cpu_to_le32(0x00000000);
2d8a425b 433 msg[4] = cpu_to_le32(handle);
95d0906f 434 msg[5] = cpu_to_le32(0x00000000);
2d8a425b
LL
435 msg[6] = cpu_to_le32(0x00000001);
436 msg[7] = cpu_to_le32(0x00000028);
3b8f5ab3 437 msg[8] = cpu_to_le32(0x00000010);
95d0906f 438 msg[9] = cpu_to_le32(0x00000000);
2d8a425b
LL
439 msg[10] = cpu_to_le32(0x00000007);
440 msg[11] = cpu_to_le32(0x00000000);
3b8f5ab3
LL
441 msg[12] = cpu_to_le32(0x00000780);
442 msg[13] = cpu_to_le32(0x00000440);
443 for (i = 14; i < 1024; ++i)
95d0906f
LL
444 msg[i] = cpu_to_le32(0x0);
445
4c6530fd 446 return amdgpu_vcn_dec_send_msg(ring, bo, fence);
95d0906f
LL
447}
448
449static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
4c6530fd 450 struct dma_fence **fence)
95d0906f
LL
451{
452 struct amdgpu_device *adev = ring->adev;
add9f9a8 453 struct amdgpu_bo *bo = NULL;
95d0906f
LL
454 uint32_t *msg;
455 int r, i;
456
add9f9a8
CK
457 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
458 AMDGPU_GEM_DOMAIN_VRAM,
459 &bo, NULL, (void **)&msg);
95d0906f
LL
460 if (r)
461 return r;
462
2d8a425b
LL
463 msg[0] = cpu_to_le32(0x00000028);
464 msg[1] = cpu_to_le32(0x00000018);
465 msg[2] = cpu_to_le32(0x00000000);
466 msg[3] = cpu_to_le32(0x00000002);
467 msg[4] = cpu_to_le32(handle);
468 msg[5] = cpu_to_le32(0x00000000);
469 for (i = 6; i < 1024; ++i)
95d0906f
LL
470 msg[i] = cpu_to_le32(0x0);
471
4c6530fd 472 return amdgpu_vcn_dec_send_msg(ring, bo, fence);
95d0906f
LL
473}
474
95d0906f
LL
475int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
476{
477 struct dma_fence *fence;
478 long r;
479
480 r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
98079389 481 if (r)
95d0906f 482 goto error;
95d0906f 483
4c6530fd 484 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
98079389 485 if (r)
95d0906f 486 goto error;
95d0906f
LL
487
488 r = dma_fence_wait_timeout(fence, false, timeout);
98079389 489 if (r == 0)
95d0906f 490 r = -ETIMEDOUT;
98079389 491 else if (r > 0)
95d0906f 492 r = 0;
95d0906f
LL
493
494 dma_fence_put(fence);
95d0906f
LL
495error:
496 return r;
497}
2d531d81 498
3e1086cf
LL
499int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
500{
501 struct amdgpu_device *adev = ring->adev;
e038b901 502 uint32_t rptr;
3e1086cf
LL
503 unsigned i;
504 int r;
505
506 r = amdgpu_ring_alloc(ring, 16);
dc9eeff8 507 if (r)
3e1086cf 508 return r;
dc9eeff8 509
e038b901
S
510 rptr = amdgpu_ring_get_rptr(ring);
511
c3bd3040 512 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
3e1086cf
LL
513 amdgpu_ring_commit(ring);
514
515 for (i = 0; i < adev->usec_timeout; i++) {
516 if (amdgpu_ring_get_rptr(ring) != rptr)
517 break;
c366be54 518 udelay(1);
3e1086cf
LL
519 }
520
dc9eeff8 521 if (i >= adev->usec_timeout)
3e1086cf 522 r = -ETIMEDOUT;
3e1086cf
LL
523
524 return r;
525}
526
2d531d81
LL
527static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
528 struct dma_fence **fence)
529{
25547cfd 530 const unsigned ib_size_dw = 16;
2d531d81
LL
531 struct amdgpu_job *job;
532 struct amdgpu_ib *ib;
533 struct dma_fence *f = NULL;
534 uint64_t dummy;
535 int i, r;
536
537 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
538 if (r)
539 return r;
540
541 ib = &job->ibs[0];
2d531d81
LL
542 dummy = ib->gpu_addr + 1024;
543
2d531d81 544 ib->length_dw = 0;
25547cfd
LL
545 ib->ptr[ib->length_dw++] = 0x00000018;
546 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
2d531d81 547 ib->ptr[ib->length_dw++] = handle;
25547cfd
LL
548 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
549 ib->ptr[ib->length_dw++] = dummy;
550 ib->ptr[ib->length_dw++] = 0x0000000b;
2d531d81 551
25547cfd
LL
552 ib->ptr[ib->length_dw++] = 0x00000014;
553 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
554 ib->ptr[ib->length_dw++] = 0x0000001c;
2d531d81
LL
555 ib->ptr[ib->length_dw++] = 0x00000000;
556 ib->ptr[ib->length_dw++] = 0x00000000;
557
25547cfd
LL
558 ib->ptr[ib->length_dw++] = 0x00000008;
559 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
2d531d81
LL
560
561 for (i = ib->length_dw; i < ib_size_dw; ++i)
562 ib->ptr[i] = 0x0;
563
ee913fd9 564 r = amdgpu_job_submit_direct(job, ring, &f);
2d531d81
LL
565 if (r)
566 goto err;
567
2d531d81
LL
568 if (fence)
569 *fence = dma_fence_get(f);
570 dma_fence_put(f);
25547cfd 571
2d531d81
LL
572 return 0;
573
574err:
575 amdgpu_job_free(job);
576 return r;
577}
578
579static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
25547cfd 580 struct dma_fence **fence)
2d531d81 581{
25547cfd 582 const unsigned ib_size_dw = 16;
2d531d81
LL
583 struct amdgpu_job *job;
584 struct amdgpu_ib *ib;
585 struct dma_fence *f = NULL;
25547cfd 586 uint64_t dummy;
2d531d81
LL
587 int i, r;
588
589 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
590 if (r)
591 return r;
592
593 ib = &job->ibs[0];
25547cfd 594 dummy = ib->gpu_addr + 1024;
2d531d81 595
2d531d81 596 ib->length_dw = 0;
25547cfd
LL
597 ib->ptr[ib->length_dw++] = 0x00000018;
598 ib->ptr[ib->length_dw++] = 0x00000001;
2d531d81 599 ib->ptr[ib->length_dw++] = handle;
25547cfd
LL
600 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
601 ib->ptr[ib->length_dw++] = dummy;
602 ib->ptr[ib->length_dw++] = 0x0000000b;
2d531d81 603
25547cfd
LL
604 ib->ptr[ib->length_dw++] = 0x00000014;
605 ib->ptr[ib->length_dw++] = 0x00000002;
606 ib->ptr[ib->length_dw++] = 0x0000001c;
2d531d81 607 ib->ptr[ib->length_dw++] = 0x00000000;
2d531d81
LL
608 ib->ptr[ib->length_dw++] = 0x00000000;
609
25547cfd
LL
610 ib->ptr[ib->length_dw++] = 0x00000008;
611 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
2d531d81
LL
612
613 for (i = ib->length_dw; i < ib_size_dw; ++i)
614 ib->ptr[i] = 0x0;
615
ee913fd9 616 r = amdgpu_job_submit_direct(job, ring, &f);
25547cfd
LL
617 if (r)
618 goto err;
2d531d81
LL
619
620 if (fence)
621 *fence = dma_fence_get(f);
622 dma_fence_put(f);
25547cfd 623
2d531d81
LL
624 return 0;
625
626err:
627 amdgpu_job_free(job);
628 return r;
629}
630
2d531d81
LL
631int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
632{
633 struct dma_fence *fence = NULL;
634 long r;
635
636 r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
98079389 637 if (r)
2d531d81 638 goto error;
2d531d81 639
25547cfd 640 r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
98079389 641 if (r)
2d531d81 642 goto error;
2d531d81
LL
643
644 r = dma_fence_wait_timeout(fence, false, timeout);
98079389 645 if (r == 0)
2d531d81 646 r = -ETIMEDOUT;
98079389 647 else if (r > 0)
2d531d81 648 r = 0;
98079389 649
2d531d81
LL
650error:
651 dma_fence_put(fence);
652 return r;
653}
b1d37606
BZ
654
655int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
656{
657 struct amdgpu_device *adev = ring->adev;
658 uint32_t tmp = 0;
659 unsigned i;
660 int r;
661
9085914a 662 WREG32(adev->vcn.external.jpeg_pitch, 0xCAFEDEAD);
b1d37606 663 r = amdgpu_ring_alloc(ring, 3);
dc9eeff8 664 if (r)
b1d37606 665 return r;
b1d37606 666
9085914a 667 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.jpeg_pitch, 0));
b1d37606
BZ
668 amdgpu_ring_write(ring, 0xDEADBEEF);
669 amdgpu_ring_commit(ring);
670
671 for (i = 0; i < adev->usec_timeout; i++) {
9085914a 672 tmp = RREG32(adev->vcn.external.jpeg_pitch);
b1d37606
BZ
673 if (tmp == 0xDEADBEEF)
674 break;
c366be54 675 udelay(1);
b1d37606
BZ
676 }
677
dc9eeff8
CK
678 if (i >= adev->usec_timeout)
679 r = -ETIMEDOUT;
b1d37606
BZ
680
681 return r;
682}
6173040f
BZ
683
684static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
685 struct dma_fence **fence)
686{
687 struct amdgpu_device *adev = ring->adev;
688 struct amdgpu_job *job;
689 struct amdgpu_ib *ib;
690 struct dma_fence *f = NULL;
691 const unsigned ib_size_dw = 16;
692 int i, r;
693
694 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
695 if (r)
696 return r;
697
698 ib = &job->ibs[0];
699
54bb93c2 700 ib->ptr[0] = PACKETJ(adev->vcn.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0);
6173040f
BZ
701 ib->ptr[1] = 0xDEADBEEF;
702 for (i = 2; i < 16; i += 2) {
703 ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
704 ib->ptr[i+1] = 0;
705 }
706 ib->length_dw = 16;
707
ee913fd9 708 r = amdgpu_job_submit_direct(job, ring, &f);
6173040f
BZ
709 if (r)
710 goto err;
711
6173040f
BZ
712 if (fence)
713 *fence = dma_fence_get(f);
714 dma_fence_put(f);
715
716 return 0;
717
718err:
719 amdgpu_job_free(job);
720 return r;
721}
722
723int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
724{
725 struct amdgpu_device *adev = ring->adev;
726 uint32_t tmp = 0;
727 unsigned i;
728 struct dma_fence *fence = NULL;
729 long r = 0;
730
731 r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
98079389 732 if (r)
6173040f 733 goto error;
6173040f
BZ
734
735 r = dma_fence_wait_timeout(fence, false, timeout);
736 if (r == 0) {
6173040f
BZ
737 r = -ETIMEDOUT;
738 goto error;
739 } else if (r < 0) {
6173040f 740 goto error;
98079389 741 } else {
6173040f 742 r = 0;
98079389 743 }
6173040f
BZ
744
745 for (i = 0; i < adev->usec_timeout; i++) {
54bb93c2 746 tmp = RREG32(adev->vcn.external.jpeg_pitch);
6173040f
BZ
747 if (tmp == 0xDEADBEEF)
748 break;
c366be54 749 udelay(1);
6173040f
BZ
750 }
751
98079389
CK
752 if (i >= adev->usec_timeout)
753 r = -ETIMEDOUT;
6173040f
BZ
754
755 dma_fence_put(fence);
6173040f
BZ
756error:
757 return r;
758}