drm/amdgpu/vcn: finish delay work before release resources
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vcn.c
CommitLineData
95d0906f
LL
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26
27#include <linux/firmware.h>
28#include <linux/module.h>
fdf2f6c5
SR
29#include <linux/pci.h>
30
95d0906f
LL
31#include <drm/drm.h>
32
33#include "amdgpu.h"
34#include "amdgpu_pm.h"
35#include "amdgpu_vcn.h"
36#include "soc15d.h"
37#include "soc15_common.h"
38
b1ebd7c0 39#include "vcn/vcn_1_0_offset.h"
bd5d5180 40#include "vcn/vcn_1_0_sh_mask.h"
95d0906f
LL
41
42/* 1 second timeout */
43#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
44
45/* Firmware Names */
46#define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
86771d9a 47#define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin"
8b47cc9b 48#define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
a7c0e401 49#define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin"
dc9b6e93 50#define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin"
a8790e24 51#define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin"
e149a2f6 52#define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin"
a3219816 53#define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin"
95d0906f
LL
54
55MODULE_FIRMWARE(FIRMWARE_RAVEN);
86771d9a 56MODULE_FIRMWARE(FIRMWARE_PICASSO);
8b47cc9b 57MODULE_FIRMWARE(FIRMWARE_RAVEN2);
a7c0e401 58MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
dc9b6e93 59MODULE_FIRMWARE(FIRMWARE_RENOIR);
a8790e24 60MODULE_FIRMWARE(FIRMWARE_NAVI10);
e149a2f6 61MODULE_FIRMWARE(FIRMWARE_NAVI14);
a3219816 62MODULE_FIRMWARE(FIRMWARE_NAVI12);
95d0906f
LL
63
64static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
65
66int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
67{
95d0906f
LL
68 unsigned long bo_size;
69 const char *fw_name;
70 const struct common_firmware_header *hdr;
62d5b8e3 71 unsigned char fw_check;
fa739f4b 72 int i, r;
95d0906f
LL
73
74 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
75
76 switch (adev->asic_type) {
77 case CHIP_RAVEN:
741deade 78 if (adev->rev_id >= 8)
8b47cc9b 79 fw_name = FIRMWARE_RAVEN2;
741deade
AD
80 else if (adev->pdev->device == 0x15d8)
81 fw_name = FIRMWARE_PICASSO;
8b47cc9b
FX
82 else
83 fw_name = FIRMWARE_RAVEN;
95d0906f 84 break;
a7c0e401
LL
85 case CHIP_ARCTURUS:
86 fw_name = FIRMWARE_ARCTURUS;
87 break;
dc9b6e93
LL
88 case CHIP_RENOIR:
89 fw_name = FIRMWARE_RENOIR;
90 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
91 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
92 adev->vcn.indirect_sram = true;
93 break;
a8790e24
LL
94 case CHIP_NAVI10:
95 fw_name = FIRMWARE_NAVI10;
450af30c
LL
96 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
97 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
98 adev->vcn.indirect_sram = true;
a8790e24 99 break;
a3219816 100 case CHIP_NAVI14:
e149a2f6 101 fw_name = FIRMWARE_NAVI14;
0377b088 102 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
134b1461 103 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
0377b088 104 adev->vcn.indirect_sram = true;
e149a2f6 105 break;
a3219816
BZ
106 case CHIP_NAVI12:
107 fw_name = FIRMWARE_NAVI12;
108 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
109 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
110 adev->vcn.indirect_sram = true;
111 break;
95d0906f
LL
112 default:
113 return -EINVAL;
114 }
115
116 r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
117 if (r) {
118 dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
119 fw_name);
120 return r;
121 }
122
123 r = amdgpu_ucode_validate(adev->vcn.fw);
124 if (r) {
125 dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
126 fw_name);
127 release_firmware(adev->vcn.fw);
128 adev->vcn.fw = NULL;
129 return r;
130 }
131
132 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
a0b2ac29 133 adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
95d0906f 134
62d5b8e3
JZ
135 /* Bit 20-23, it is encode major and non-zero for new naming convention.
136 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
137 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
138 * is zero in old naming convention, this field is always zero so far.
139 * These four bits are used to tell which naming convention is present.
140 */
141 fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
142 if (fw_check) {
143 unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
144
145 fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
146 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
147 enc_major = fw_check;
148 dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
149 vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
150 DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
151 enc_major, enc_minor, dec_ver, vep, fw_rev);
152 } else {
153 unsigned int version_major, version_minor, family_id;
154
155 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
156 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
157 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
158 DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
159 version_major, version_minor, family_id);
160 }
95d0906f 161
825da4d9 162 bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
134b1461 163 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
4d77c0f6 164 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
fa739f4b
JZ
165
166 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
cd1fd7b3
JZ
167 if (adev->vcn.harvest_config & (1 << i))
168 continue;
169
fa739f4b
JZ
170 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
171 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo,
172 &adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr);
173 if (r) {
174 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
175 return r;
176 }
95d0906f
LL
177 }
178
a77b9fdf
LL
179 if (adev->vcn.indirect_sram) {
180 r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
181 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.dpg_sram_bo,
182 &adev->vcn.dpg_sram_gpu_addr, &adev->vcn.dpg_sram_cpu_addr);
183 if (r) {
184 dev_err(adev->dev, "(%d) failed to allocate DPG bo\n", r);
185 return r;
186 }
187 }
188
95d0906f
LL
189 return 0;
190}
191
192int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
193{
fa739f4b 194 int i, j;
95d0906f 195
622b2a0a
AD
196 cancel_delayed_work_sync(&adev->vcn.idle_work);
197
a77b9fdf
LL
198 if (adev->vcn.indirect_sram) {
199 amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo,
fa739f4b
JZ
200 &adev->vcn.dpg_sram_gpu_addr,
201 (void **)&adev->vcn.dpg_sram_cpu_addr);
a77b9fdf
LL
202 }
203
fa739f4b 204 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
cd1fd7b3
JZ
205 if (adev->vcn.harvest_config & (1 << j))
206 continue;
fa739f4b 207 kvfree(adev->vcn.inst[j].saved_bo);
95d0906f 208
fa739f4b
JZ
209 amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
210 &adev->vcn.inst[j].gpu_addr,
211 (void **)&adev->vcn.inst[j].cpu_addr);
95d0906f 212
fa739f4b 213 amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
101c6fee 214
fa739f4b
JZ
215 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
216 amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
217
218 amdgpu_ring_fini(&adev->vcn.inst[j].ring_jpeg);
219 }
0c5e4b3e 220
95d0906f
LL
221 release_firmware(adev->vcn.fw);
222
223 return 0;
224}
225
226int amdgpu_vcn_suspend(struct amdgpu_device *adev)
227{
228 unsigned size;
229 void *ptr;
fa739f4b 230 int i;
95d0906f 231
61ea6f58
RZ
232 cancel_delayed_work_sync(&adev->vcn.idle_work);
233
fa739f4b 234 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
cd1fd7b3
JZ
235 if (adev->vcn.harvest_config & (1 << i))
236 continue;
fa739f4b
JZ
237 if (adev->vcn.inst[i].vcpu_bo == NULL)
238 return 0;
95d0906f 239
fa739f4b
JZ
240 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
241 ptr = adev->vcn.inst[i].cpu_addr;
95d0906f 242
fa739f4b
JZ
243 adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
244 if (!adev->vcn.inst[i].saved_bo)
245 return -ENOMEM;
95d0906f 246
fa739f4b
JZ
247 memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
248 }
95d0906f
LL
249 return 0;
250}
251
252int amdgpu_vcn_resume(struct amdgpu_device *adev)
253{
254 unsigned size;
255 void *ptr;
fa739f4b 256 int i;
95d0906f 257
fa739f4b 258 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
cd1fd7b3
JZ
259 if (adev->vcn.harvest_config & (1 << i))
260 continue;
fa739f4b
JZ
261 if (adev->vcn.inst[i].vcpu_bo == NULL)
262 return -EINVAL;
263
264 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
265 ptr = adev->vcn.inst[i].cpu_addr;
266
267 if (adev->vcn.inst[i].saved_bo != NULL) {
268 memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
269 kvfree(adev->vcn.inst[i].saved_bo);
270 adev->vcn.inst[i].saved_bo = NULL;
271 } else {
272 const struct common_firmware_header *hdr;
273 unsigned offset;
274
275 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
134b1461 276 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
fa739f4b
JZ
277 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
278 memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
279 le32_to_cpu(hdr->ucode_size_bytes));
280 size -= le32_to_cpu(hdr->ucode_size_bytes);
281 ptr += le32_to_cpu(hdr->ucode_size_bytes);
282 }
283 memset_io(ptr, 0, size);
4d77c0f6 284 }
95d0906f 285 }
95d0906f
LL
286 return 0;
287}
288
3e1086cf
LL
289static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
290{
291 struct amdgpu_device *adev =
292 container_of(work, struct amdgpu_device, vcn.idle_work.work);
fa739f4b
JZ
293 unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
294 unsigned int i, j;
646e906d 295
fa739f4b 296 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
cd1fd7b3
JZ
297 if (adev->vcn.harvest_config & (1 << j))
298 continue;
fa739f4b
JZ
299 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
300 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
301 }
3e1086cf 302
fa739f4b
JZ
303 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
304 struct dpg_pause_state new_state;
bd5d5180 305
fa739f4b
JZ
306 if (fence[j])
307 new_state.fw_based = VCN_DPG_STATE__PAUSE;
308 else
309 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
bd5d5180 310
fa739f4b
JZ
311 if (amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_jpeg))
312 new_state.jpeg = VCN_DPG_STATE__PAUSE;
313 else
314 new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
bd5d5180 315
fa739f4b
JZ
316 adev->vcn.pause_dpg_mode(adev, &new_state);
317 }
bd5d5180 318
fa739f4b
JZ
319 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_jpeg);
320 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
321 fences += fence[j];
322 }
7b4e54a9 323
3e1086cf 324 if (fences == 0) {
3fded222 325 amdgpu_gfx_off_ctrl(adev, true);
7c16d24a 326 if (adev->asic_type < CHIP_ARCTURUS && adev->pm.dpm_enabled)
3e1086cf 327 amdgpu_dpm_enable_uvd(adev, false);
22cc6c5e
RZ
328 else
329 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
330 AMD_PG_STATE_GATE);
3e1086cf
LL
331 } else {
332 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
333 }
334}
335
336void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
337{
338 struct amdgpu_device *adev = ring->adev;
339 bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
340
c1ee15b3 341 if (set_clocks) {
3fded222 342 amdgpu_gfx_off_ctrl(adev, false);
7c16d24a 343 if (adev->asic_type < CHIP_ARCTURUS && adev->pm.dpm_enabled)
22cc6c5e
RZ
344 amdgpu_dpm_enable_uvd(adev, true);
345 else
346 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
347 AMD_PG_STATE_UNGATE);
3e1086cf 348 }
bd5d5180
JZ
349
350 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
351 struct dpg_pause_state new_state;
12e8b301
JZ
352 unsigned int fences = 0;
353 unsigned int i;
bd5d5180 354
12e8b301 355 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
fa739f4b 356 fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
12e8b301
JZ
357 }
358 if (fences)
bd5d5180
JZ
359 new_state.fw_based = VCN_DPG_STATE__PAUSE;
360 else
12e8b301 361 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
bd5d5180 362
fa739f4b 363 if (amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_jpeg))
bd5d5180
JZ
364 new_state.jpeg = VCN_DPG_STATE__PAUSE;
365 else
12e8b301
JZ
366 new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
367
368 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
369 new_state.fw_based = VCN_DPG_STATE__PAUSE;
370 else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
371 new_state.jpeg = VCN_DPG_STATE__PAUSE;
bd5d5180 372
9dc7b02a 373 adev->vcn.pause_dpg_mode(adev, &new_state);
bd5d5180 374 }
3e1086cf
LL
375}
376
377void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
378{
379 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
380}
381
8c303c01
LL
382int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
383{
384 struct amdgpu_device *adev = ring->adev;
385 uint32_t tmp = 0;
386 unsigned i;
387 int r;
388
fa739f4b 389 WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
8c303c01 390 r = amdgpu_ring_alloc(ring, 3);
dc9eeff8 391 if (r)
8c303c01 392 return r;
45a1a48b 393 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
8c303c01
LL
394 amdgpu_ring_write(ring, 0xDEADBEEF);
395 amdgpu_ring_commit(ring);
396 for (i = 0; i < adev->usec_timeout; i++) {
fa739f4b 397 tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
8c303c01
LL
398 if (tmp == 0xDEADBEEF)
399 break;
c366be54 400 udelay(1);
8c303c01
LL
401 }
402
dc9eeff8
CK
403 if (i >= adev->usec_timeout)
404 r = -ETIMEDOUT;
405
8c303c01
LL
406 return r;
407}
408
add9f9a8 409static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
4c6530fd 410 struct amdgpu_bo *bo,
add9f9a8 411 struct dma_fence **fence)
95d0906f 412{
add9f9a8
CK
413 struct amdgpu_device *adev = ring->adev;
414 struct dma_fence *f = NULL;
95d0906f
LL
415 struct amdgpu_job *job;
416 struct amdgpu_ib *ib;
95d0906f
LL
417 uint64_t addr;
418 int i, r;
419
95d0906f
LL
420 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
421 if (r)
422 goto err;
423
424 ib = &job->ibs[0];
425 addr = amdgpu_bo_gpu_offset(bo);
60a2309e 426 ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
95d0906f 427 ib->ptr[1] = addr;
60a2309e 428 ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
95d0906f 429 ib->ptr[3] = addr >> 32;
60a2309e 430 ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
95d0906f
LL
431 ib->ptr[5] = 0;
432 for (i = 6; i < 16; i += 2) {
60a2309e 433 ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
95d0906f
LL
434 ib->ptr[i+1] = 0;
435 }
436 ib->length_dw = 16;
437
ee913fd9 438 r = amdgpu_job_submit_direct(job, ring, &f);
4c6530fd
LL
439 if (r)
440 goto err_free;
95d0906f 441
add9f9a8
CK
442 amdgpu_bo_fence(bo, f, false);
443 amdgpu_bo_unreserve(bo);
444 amdgpu_bo_unref(&bo);
95d0906f
LL
445
446 if (fence)
447 *fence = dma_fence_get(f);
95d0906f
LL
448 dma_fence_put(f);
449
450 return 0;
451
452err_free:
453 amdgpu_job_free(job);
454
455err:
add9f9a8
CK
456 amdgpu_bo_unreserve(bo);
457 amdgpu_bo_unref(&bo);
95d0906f
LL
458 return r;
459}
460
461static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
462 struct dma_fence **fence)
463{
464 struct amdgpu_device *adev = ring->adev;
add9f9a8 465 struct amdgpu_bo *bo = NULL;
95d0906f
LL
466 uint32_t *msg;
467 int r, i;
468
add9f9a8
CK
469 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
470 AMDGPU_GEM_DOMAIN_VRAM,
471 &bo, NULL, (void **)&msg);
95d0906f
LL
472 if (r)
473 return r;
474
2d8a425b 475 msg[0] = cpu_to_le32(0x00000028);
3b8f5ab3 476 msg[1] = cpu_to_le32(0x00000038);
2d8a425b 477 msg[2] = cpu_to_le32(0x00000001);
95d0906f 478 msg[3] = cpu_to_le32(0x00000000);
2d8a425b 479 msg[4] = cpu_to_le32(handle);
95d0906f 480 msg[5] = cpu_to_le32(0x00000000);
2d8a425b
LL
481 msg[6] = cpu_to_le32(0x00000001);
482 msg[7] = cpu_to_le32(0x00000028);
3b8f5ab3 483 msg[8] = cpu_to_le32(0x00000010);
95d0906f 484 msg[9] = cpu_to_le32(0x00000000);
2d8a425b
LL
485 msg[10] = cpu_to_le32(0x00000007);
486 msg[11] = cpu_to_le32(0x00000000);
3b8f5ab3
LL
487 msg[12] = cpu_to_le32(0x00000780);
488 msg[13] = cpu_to_le32(0x00000440);
489 for (i = 14; i < 1024; ++i)
95d0906f
LL
490 msg[i] = cpu_to_le32(0x0);
491
4c6530fd 492 return amdgpu_vcn_dec_send_msg(ring, bo, fence);
95d0906f
LL
493}
494
495static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
4c6530fd 496 struct dma_fence **fence)
95d0906f
LL
497{
498 struct amdgpu_device *adev = ring->adev;
add9f9a8 499 struct amdgpu_bo *bo = NULL;
95d0906f
LL
500 uint32_t *msg;
501 int r, i;
502
add9f9a8
CK
503 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
504 AMDGPU_GEM_DOMAIN_VRAM,
505 &bo, NULL, (void **)&msg);
95d0906f
LL
506 if (r)
507 return r;
508
2d8a425b
LL
509 msg[0] = cpu_to_le32(0x00000028);
510 msg[1] = cpu_to_le32(0x00000018);
511 msg[2] = cpu_to_le32(0x00000000);
512 msg[3] = cpu_to_le32(0x00000002);
513 msg[4] = cpu_to_le32(handle);
514 msg[5] = cpu_to_le32(0x00000000);
515 for (i = 6; i < 1024; ++i)
95d0906f
LL
516 msg[i] = cpu_to_le32(0x0);
517
4c6530fd 518 return amdgpu_vcn_dec_send_msg(ring, bo, fence);
95d0906f
LL
519}
520
95d0906f
LL
521int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
522{
523 struct dma_fence *fence;
524 long r;
525
526 r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
98079389 527 if (r)
95d0906f 528 goto error;
95d0906f 529
4c6530fd 530 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
98079389 531 if (r)
95d0906f 532 goto error;
95d0906f
LL
533
534 r = dma_fence_wait_timeout(fence, false, timeout);
98079389 535 if (r == 0)
95d0906f 536 r = -ETIMEDOUT;
98079389 537 else if (r > 0)
95d0906f 538 r = 0;
95d0906f
LL
539
540 dma_fence_put(fence);
95d0906f
LL
541error:
542 return r;
543}
2d531d81 544
3e1086cf
LL
545int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
546{
547 struct amdgpu_device *adev = ring->adev;
e038b901 548 uint32_t rptr;
3e1086cf
LL
549 unsigned i;
550 int r;
551
552 r = amdgpu_ring_alloc(ring, 16);
dc9eeff8 553 if (r)
3e1086cf 554 return r;
dc9eeff8 555
e038b901
S
556 rptr = amdgpu_ring_get_rptr(ring);
557
c3bd3040 558 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
3e1086cf
LL
559 amdgpu_ring_commit(ring);
560
561 for (i = 0; i < adev->usec_timeout; i++) {
562 if (amdgpu_ring_get_rptr(ring) != rptr)
563 break;
c366be54 564 udelay(1);
3e1086cf
LL
565 }
566
dc9eeff8 567 if (i >= adev->usec_timeout)
3e1086cf 568 r = -ETIMEDOUT;
3e1086cf
LL
569
570 return r;
571}
572
2d531d81 573static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
8c32d043
AD
574 struct amdgpu_bo *bo,
575 struct dma_fence **fence)
2d531d81 576{
25547cfd 577 const unsigned ib_size_dw = 16;
2d531d81
LL
578 struct amdgpu_job *job;
579 struct amdgpu_ib *ib;
580 struct dma_fence *f = NULL;
8c32d043 581 uint64_t addr;
2d531d81
LL
582 int i, r;
583
584 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
585 if (r)
586 return r;
587
588 ib = &job->ibs[0];
8c32d043 589 addr = amdgpu_bo_gpu_offset(bo);
2d531d81 590
2d531d81 591 ib->length_dw = 0;
25547cfd
LL
592 ib->ptr[ib->length_dw++] = 0x00000018;
593 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
2d531d81 594 ib->ptr[ib->length_dw++] = handle;
8c32d043
AD
595 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
596 ib->ptr[ib->length_dw++] = addr;
25547cfd 597 ib->ptr[ib->length_dw++] = 0x0000000b;
2d531d81 598
25547cfd
LL
599 ib->ptr[ib->length_dw++] = 0x00000014;
600 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
601 ib->ptr[ib->length_dw++] = 0x0000001c;
2d531d81
LL
602 ib->ptr[ib->length_dw++] = 0x00000000;
603 ib->ptr[ib->length_dw++] = 0x00000000;
604
25547cfd
LL
605 ib->ptr[ib->length_dw++] = 0x00000008;
606 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
2d531d81
LL
607
608 for (i = ib->length_dw; i < ib_size_dw; ++i)
609 ib->ptr[i] = 0x0;
610
ee913fd9 611 r = amdgpu_job_submit_direct(job, ring, &f);
2d531d81
LL
612 if (r)
613 goto err;
614
2d531d81
LL
615 if (fence)
616 *fence = dma_fence_get(f);
617 dma_fence_put(f);
25547cfd 618
2d531d81
LL
619 return 0;
620
621err:
622 amdgpu_job_free(job);
623 return r;
624}
625
626static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
8c32d043
AD
627 struct amdgpu_bo *bo,
628 struct dma_fence **fence)
2d531d81 629{
25547cfd 630 const unsigned ib_size_dw = 16;
2d531d81
LL
631 struct amdgpu_job *job;
632 struct amdgpu_ib *ib;
633 struct dma_fence *f = NULL;
8c32d043 634 uint64_t addr;
2d531d81
LL
635 int i, r;
636
637 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
638 if (r)
639 return r;
640
641 ib = &job->ibs[0];
8c32d043 642 addr = amdgpu_bo_gpu_offset(bo);
2d531d81 643
2d531d81 644 ib->length_dw = 0;
25547cfd
LL
645 ib->ptr[ib->length_dw++] = 0x00000018;
646 ib->ptr[ib->length_dw++] = 0x00000001;
2d531d81 647 ib->ptr[ib->length_dw++] = handle;
8c32d043
AD
648 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
649 ib->ptr[ib->length_dw++] = addr;
25547cfd 650 ib->ptr[ib->length_dw++] = 0x0000000b;
2d531d81 651
25547cfd
LL
652 ib->ptr[ib->length_dw++] = 0x00000014;
653 ib->ptr[ib->length_dw++] = 0x00000002;
654 ib->ptr[ib->length_dw++] = 0x0000001c;
2d531d81 655 ib->ptr[ib->length_dw++] = 0x00000000;
2d531d81
LL
656 ib->ptr[ib->length_dw++] = 0x00000000;
657
25547cfd
LL
658 ib->ptr[ib->length_dw++] = 0x00000008;
659 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
2d531d81
LL
660
661 for (i = ib->length_dw; i < ib_size_dw; ++i)
662 ib->ptr[i] = 0x0;
663
ee913fd9 664 r = amdgpu_job_submit_direct(job, ring, &f);
25547cfd
LL
665 if (r)
666 goto err;
2d531d81
LL
667
668 if (fence)
669 *fence = dma_fence_get(f);
670 dma_fence_put(f);
25547cfd 671
2d531d81
LL
672 return 0;
673
674err:
675 amdgpu_job_free(job);
676 return r;
677}
678
2d531d81
LL
679int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
680{
681 struct dma_fence *fence = NULL;
8c32d043 682 struct amdgpu_bo *bo = NULL;
2d531d81
LL
683 long r;
684
8c32d043
AD
685 r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
686 AMDGPU_GEM_DOMAIN_VRAM,
687 &bo, NULL, NULL);
688 if (r)
689 return r;
690
691 r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
98079389 692 if (r)
2d531d81 693 goto error;
2d531d81 694
8c32d043 695 r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
98079389 696 if (r)
2d531d81 697 goto error;
2d531d81
LL
698
699 r = dma_fence_wait_timeout(fence, false, timeout);
98079389 700 if (r == 0)
2d531d81 701 r = -ETIMEDOUT;
98079389 702 else if (r > 0)
2d531d81 703 r = 0;
98079389 704
2d531d81
LL
705error:
706 dma_fence_put(fence);
8c32d043
AD
707 amdgpu_bo_unreserve(bo);
708 amdgpu_bo_unref(&bo);
2d531d81
LL
709 return r;
710}
b1d37606
BZ
711
712int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
713{
714 struct amdgpu_device *adev = ring->adev;
715 uint32_t tmp = 0;
716 unsigned i;
717 int r;
718
fa739f4b 719 WREG32(adev->vcn.inst[ring->me].external.jpeg_pitch, 0xCAFEDEAD);
b1d37606 720 r = amdgpu_ring_alloc(ring, 3);
dc9eeff8 721 if (r)
b1d37606 722 return r;
b1d37606 723
9085914a 724 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.jpeg_pitch, 0));
b1d37606
BZ
725 amdgpu_ring_write(ring, 0xDEADBEEF);
726 amdgpu_ring_commit(ring);
727
728 for (i = 0; i < adev->usec_timeout; i++) {
fa739f4b 729 tmp = RREG32(adev->vcn.inst[ring->me].external.jpeg_pitch);
b1d37606
BZ
730 if (tmp == 0xDEADBEEF)
731 break;
c366be54 732 udelay(1);
b1d37606
BZ
733 }
734
dc9eeff8
CK
735 if (i >= adev->usec_timeout)
736 r = -ETIMEDOUT;
b1d37606
BZ
737
738 return r;
739}
6173040f
BZ
740
741static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
742 struct dma_fence **fence)
743{
744 struct amdgpu_device *adev = ring->adev;
745 struct amdgpu_job *job;
746 struct amdgpu_ib *ib;
747 struct dma_fence *f = NULL;
748 const unsigned ib_size_dw = 16;
749 int i, r;
750
751 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
752 if (r)
753 return r;
754
755 ib = &job->ibs[0];
756
54bb93c2 757 ib->ptr[0] = PACKETJ(adev->vcn.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0);
6173040f
BZ
758 ib->ptr[1] = 0xDEADBEEF;
759 for (i = 2; i < 16; i += 2) {
760 ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
761 ib->ptr[i+1] = 0;
762 }
763 ib->length_dw = 16;
764
ee913fd9 765 r = amdgpu_job_submit_direct(job, ring, &f);
6173040f
BZ
766 if (r)
767 goto err;
768
6173040f
BZ
769 if (fence)
770 *fence = dma_fence_get(f);
771 dma_fence_put(f);
772
773 return 0;
774
775err:
776 amdgpu_job_free(job);
777 return r;
778}
779
780int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
781{
782 struct amdgpu_device *adev = ring->adev;
783 uint32_t tmp = 0;
784 unsigned i;
785 struct dma_fence *fence = NULL;
786 long r = 0;
787
788 r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
98079389 789 if (r)
6173040f 790 goto error;
6173040f
BZ
791
792 r = dma_fence_wait_timeout(fence, false, timeout);
793 if (r == 0) {
6173040f
BZ
794 r = -ETIMEDOUT;
795 goto error;
796 } else if (r < 0) {
6173040f 797 goto error;
98079389 798 } else {
6173040f 799 r = 0;
98079389 800 }
6173040f
BZ
801
802 for (i = 0; i < adev->usec_timeout; i++) {
fa739f4b 803 tmp = RREG32(adev->vcn.inst[ring->me].external.jpeg_pitch);
6173040f
BZ
804 if (tmp == 0xDEADBEEF)
805 break;
c366be54 806 udelay(1);
6173040f
BZ
807 }
808
98079389
CK
809 if (i >= adev->usec_timeout)
810 r = -ETIMEDOUT;
6173040f
BZ
811
812 dma_fence_put(fence);
6173040f
BZ
813error:
814 return r;
815}