drm/amd/powerplay: disable gfxoff for navi14
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vcn.c
CommitLineData
95d0906f
LL
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26
27#include <linux/firmware.h>
28#include <linux/module.h>
fdf2f6c5
SR
29#include <linux/pci.h>
30
95d0906f
LL
31#include <drm/drm.h>
32
33#include "amdgpu.h"
34#include "amdgpu_pm.h"
35#include "amdgpu_vcn.h"
36#include "soc15d.h"
37#include "soc15_common.h"
38
b1ebd7c0 39#include "vcn/vcn_1_0_offset.h"
bd5d5180 40#include "vcn/vcn_1_0_sh_mask.h"
95d0906f
LL
41
42/* 1 second timeout */
43#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
44
45/* Firmware Names */
46#define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
86771d9a 47#define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin"
8b47cc9b 48#define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
a8790e24 49#define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin"
e149a2f6 50#define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin"
95d0906f
LL
51
52MODULE_FIRMWARE(FIRMWARE_RAVEN);
86771d9a 53MODULE_FIRMWARE(FIRMWARE_PICASSO);
8b47cc9b 54MODULE_FIRMWARE(FIRMWARE_RAVEN2);
a8790e24 55MODULE_FIRMWARE(FIRMWARE_NAVI10);
e149a2f6 56MODULE_FIRMWARE(FIRMWARE_NAVI14);
95d0906f
LL
57
58static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
59
60int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
61{
95d0906f
LL
62 unsigned long bo_size;
63 const char *fw_name;
64 const struct common_firmware_header *hdr;
62d5b8e3 65 unsigned char fw_check;
95d0906f
LL
66 int r;
67
68 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
69
70 switch (adev->asic_type) {
71 case CHIP_RAVEN:
741deade 72 if (adev->rev_id >= 8)
8b47cc9b 73 fw_name = FIRMWARE_RAVEN2;
741deade
AD
74 else if (adev->pdev->device == 0x15d8)
75 fw_name = FIRMWARE_PICASSO;
8b47cc9b
FX
76 else
77 fw_name = FIRMWARE_RAVEN;
95d0906f 78 break;
a8790e24
LL
79 case CHIP_NAVI10:
80 fw_name = FIRMWARE_NAVI10;
450af30c
LL
81 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
82 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
83 adev->vcn.indirect_sram = true;
a8790e24 84 break;
e149a2f6
JZ
85 case CHIP_NAVI14:
86 fw_name = FIRMWARE_NAVI14;
87 break;
95d0906f
LL
88 default:
89 return -EINVAL;
90 }
91
92 r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
93 if (r) {
94 dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
95 fw_name);
96 return r;
97 }
98
99 r = amdgpu_ucode_validate(adev->vcn.fw);
100 if (r) {
101 dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
102 fw_name);
103 release_firmware(adev->vcn.fw);
104 adev->vcn.fw = NULL;
105 return r;
106 }
107
108 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
a0b2ac29 109 adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
95d0906f 110
62d5b8e3
JZ
111 /* Bit 20-23, it is encode major and non-zero for new naming convention.
112 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
113 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
114 * is zero in old naming convention, this field is always zero so far.
115 * These four bits are used to tell which naming convention is present.
116 */
117 fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
118 if (fw_check) {
119 unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
120
121 fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
122 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
123 enc_major = fw_check;
124 dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
125 vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
126 DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
127 enc_major, enc_minor, dec_ver, vep, fw_rev);
128 } else {
129 unsigned int version_major, version_minor, family_id;
130
131 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
132 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
133 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
134 DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
135 version_major, version_minor, family_id);
136 }
95d0906f 137
825da4d9 138 bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
4d77c0f6
LG
139 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
140 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
95d0906f
LL
141 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
142 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
143 &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
144 if (r) {
145 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
146 return r;
147 }
148
a77b9fdf
LL
149 if (adev->vcn.indirect_sram) {
150 r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
151 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.dpg_sram_bo,
152 &adev->vcn.dpg_sram_gpu_addr, &adev->vcn.dpg_sram_cpu_addr);
153 if (r) {
154 dev_err(adev->dev, "(%d) failed to allocate DPG bo\n", r);
155 return r;
156 }
157 }
158
95d0906f
LL
159 return 0;
160}
161
162int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
163{
101c6fee
LL
164 int i;
165
c9533d1b 166 kvfree(adev->vcn.saved_bo);
95d0906f 167
a77b9fdf
LL
168 if (adev->vcn.indirect_sram) {
169 amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo,
170 &adev->vcn.dpg_sram_gpu_addr,
171 (void **)&adev->vcn.dpg_sram_cpu_addr);
172 }
173
95d0906f
LL
174 amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
175 &adev->vcn.gpu_addr,
176 (void **)&adev->vcn.cpu_addr);
177
178 amdgpu_ring_fini(&adev->vcn.ring_dec);
179
101c6fee
LL
180 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
181 amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
182
0c5e4b3e
BZ
183 amdgpu_ring_fini(&adev->vcn.ring_jpeg);
184
95d0906f
LL
185 release_firmware(adev->vcn.fw);
186
187 return 0;
188}
189
190int amdgpu_vcn_suspend(struct amdgpu_device *adev)
191{
192 unsigned size;
193 void *ptr;
194
61ea6f58
RZ
195 cancel_delayed_work_sync(&adev->vcn.idle_work);
196
95d0906f
LL
197 if (adev->vcn.vcpu_bo == NULL)
198 return 0;
199
95d0906f
LL
200 size = amdgpu_bo_size(adev->vcn.vcpu_bo);
201 ptr = adev->vcn.cpu_addr;
202
c9533d1b 203 adev->vcn.saved_bo = kvmalloc(size, GFP_KERNEL);
95d0906f
LL
204 if (!adev->vcn.saved_bo)
205 return -ENOMEM;
206
207 memcpy_fromio(adev->vcn.saved_bo, ptr, size);
208
209 return 0;
210}
211
212int amdgpu_vcn_resume(struct amdgpu_device *adev)
213{
214 unsigned size;
215 void *ptr;
216
217 if (adev->vcn.vcpu_bo == NULL)
218 return -EINVAL;
219
220 size = amdgpu_bo_size(adev->vcn.vcpu_bo);
221 ptr = adev->vcn.cpu_addr;
222
223 if (adev->vcn.saved_bo != NULL) {
224 memcpy_toio(ptr, adev->vcn.saved_bo, size);
c9533d1b 225 kvfree(adev->vcn.saved_bo);
95d0906f
LL
226 adev->vcn.saved_bo = NULL;
227 } else {
228 const struct common_firmware_header *hdr;
229 unsigned offset;
230
231 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
4d77c0f6
LG
232 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
233 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
234 memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
235 le32_to_cpu(hdr->ucode_size_bytes));
236 size -= le32_to_cpu(hdr->ucode_size_bytes);
237 ptr += le32_to_cpu(hdr->ucode_size_bytes);
238 }
95d0906f
LL
239 memset_io(ptr, 0, size);
240 }
241
242 return 0;
243}
244
3e1086cf
LL
245static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
246{
247 struct amdgpu_device *adev =
248 container_of(work, struct amdgpu_device, vcn.idle_work.work);
bd5d5180
JZ
249 unsigned int fences = 0;
250 unsigned int i;
646e906d
AD
251
252 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
253 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
254 }
3e1086cf 255
bd5d5180
JZ
256 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
257 struct dpg_pause_state new_state;
258
259 if (fences)
260 new_state.fw_based = VCN_DPG_STATE__PAUSE;
261 else
262 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
263
264 if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
265 new_state.jpeg = VCN_DPG_STATE__PAUSE;
266 else
267 new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
268
9dc7b02a 269 adev->vcn.pause_dpg_mode(adev, &new_state);
bd5d5180
JZ
270 }
271
7b4e54a9 272 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg);
bd5d5180 273 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
7b4e54a9 274
3e1086cf 275 if (fences == 0) {
3fded222 276 amdgpu_gfx_off_ctrl(adev, true);
6e4cb4e8 277 if (adev->asic_type < CHIP_NAVI10 && adev->pm.dpm_enabled)
3e1086cf 278 amdgpu_dpm_enable_uvd(adev, false);
22cc6c5e
RZ
279 else
280 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
281 AMD_PG_STATE_GATE);
3e1086cf
LL
282 } else {
283 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
284 }
285}
286
287void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
288{
289 struct amdgpu_device *adev = ring->adev;
290 bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
291
c1ee15b3 292 if (set_clocks) {
3fded222 293 amdgpu_gfx_off_ctrl(adev, false);
6e4cb4e8 294 if (adev->asic_type < CHIP_NAVI10 && adev->pm.dpm_enabled)
22cc6c5e
RZ
295 amdgpu_dpm_enable_uvd(adev, true);
296 else
297 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
298 AMD_PG_STATE_UNGATE);
3e1086cf 299 }
bd5d5180
JZ
300
301 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
302 struct dpg_pause_state new_state;
12e8b301
JZ
303 unsigned int fences = 0;
304 unsigned int i;
bd5d5180 305
12e8b301
JZ
306 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
307 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
308 }
309 if (fences)
bd5d5180
JZ
310 new_state.fw_based = VCN_DPG_STATE__PAUSE;
311 else
12e8b301 312 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
bd5d5180 313
12e8b301 314 if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
bd5d5180
JZ
315 new_state.jpeg = VCN_DPG_STATE__PAUSE;
316 else
12e8b301
JZ
317 new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
318
319 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
320 new_state.fw_based = VCN_DPG_STATE__PAUSE;
321 else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
322 new_state.jpeg = VCN_DPG_STATE__PAUSE;
bd5d5180 323
9dc7b02a 324 adev->vcn.pause_dpg_mode(adev, &new_state);
bd5d5180 325 }
3e1086cf
LL
326}
327
328void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
329{
330 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
331}
332
8c303c01
LL
333int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
334{
335 struct amdgpu_device *adev = ring->adev;
336 uint32_t tmp = 0;
337 unsigned i;
338 int r;
339
45a1a48b 340 WREG32(adev->vcn.external.scratch9, 0xCAFEDEAD);
8c303c01 341 r = amdgpu_ring_alloc(ring, 3);
dc9eeff8 342 if (r)
8c303c01 343 return r;
45a1a48b 344 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
8c303c01
LL
345 amdgpu_ring_write(ring, 0xDEADBEEF);
346 amdgpu_ring_commit(ring);
347 for (i = 0; i < adev->usec_timeout; i++) {
45a1a48b 348 tmp = RREG32(adev->vcn.external.scratch9);
8c303c01
LL
349 if (tmp == 0xDEADBEEF)
350 break;
c366be54 351 udelay(1);
8c303c01
LL
352 }
353
dc9eeff8
CK
354 if (i >= adev->usec_timeout)
355 r = -ETIMEDOUT;
356
8c303c01
LL
357 return r;
358}
359
add9f9a8 360static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
4c6530fd 361 struct amdgpu_bo *bo,
add9f9a8 362 struct dma_fence **fence)
95d0906f 363{
add9f9a8
CK
364 struct amdgpu_device *adev = ring->adev;
365 struct dma_fence *f = NULL;
95d0906f
LL
366 struct amdgpu_job *job;
367 struct amdgpu_ib *ib;
95d0906f
LL
368 uint64_t addr;
369 int i, r;
370
95d0906f
LL
371 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
372 if (r)
373 goto err;
374
375 ib = &job->ibs[0];
376 addr = amdgpu_bo_gpu_offset(bo);
60a2309e 377 ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
95d0906f 378 ib->ptr[1] = addr;
60a2309e 379 ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
95d0906f 380 ib->ptr[3] = addr >> 32;
60a2309e 381 ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
95d0906f
LL
382 ib->ptr[5] = 0;
383 for (i = 6; i < 16; i += 2) {
60a2309e 384 ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
95d0906f
LL
385 ib->ptr[i+1] = 0;
386 }
387 ib->length_dw = 16;
388
ee913fd9 389 r = amdgpu_job_submit_direct(job, ring, &f);
4c6530fd
LL
390 if (r)
391 goto err_free;
95d0906f 392
add9f9a8
CK
393 amdgpu_bo_fence(bo, f, false);
394 amdgpu_bo_unreserve(bo);
395 amdgpu_bo_unref(&bo);
95d0906f
LL
396
397 if (fence)
398 *fence = dma_fence_get(f);
95d0906f
LL
399 dma_fence_put(f);
400
401 return 0;
402
403err_free:
404 amdgpu_job_free(job);
405
406err:
add9f9a8
CK
407 amdgpu_bo_unreserve(bo);
408 amdgpu_bo_unref(&bo);
95d0906f
LL
409 return r;
410}
411
412static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
413 struct dma_fence **fence)
414{
415 struct amdgpu_device *adev = ring->adev;
add9f9a8 416 struct amdgpu_bo *bo = NULL;
95d0906f
LL
417 uint32_t *msg;
418 int r, i;
419
add9f9a8
CK
420 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
421 AMDGPU_GEM_DOMAIN_VRAM,
422 &bo, NULL, (void **)&msg);
95d0906f
LL
423 if (r)
424 return r;
425
2d8a425b 426 msg[0] = cpu_to_le32(0x00000028);
3b8f5ab3 427 msg[1] = cpu_to_le32(0x00000038);
2d8a425b 428 msg[2] = cpu_to_le32(0x00000001);
95d0906f 429 msg[3] = cpu_to_le32(0x00000000);
2d8a425b 430 msg[4] = cpu_to_le32(handle);
95d0906f 431 msg[5] = cpu_to_le32(0x00000000);
2d8a425b
LL
432 msg[6] = cpu_to_le32(0x00000001);
433 msg[7] = cpu_to_le32(0x00000028);
3b8f5ab3 434 msg[8] = cpu_to_le32(0x00000010);
95d0906f 435 msg[9] = cpu_to_le32(0x00000000);
2d8a425b
LL
436 msg[10] = cpu_to_le32(0x00000007);
437 msg[11] = cpu_to_le32(0x00000000);
3b8f5ab3
LL
438 msg[12] = cpu_to_le32(0x00000780);
439 msg[13] = cpu_to_le32(0x00000440);
440 for (i = 14; i < 1024; ++i)
95d0906f
LL
441 msg[i] = cpu_to_le32(0x0);
442
4c6530fd 443 return amdgpu_vcn_dec_send_msg(ring, bo, fence);
95d0906f
LL
444}
445
446static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
4c6530fd 447 struct dma_fence **fence)
95d0906f
LL
448{
449 struct amdgpu_device *adev = ring->adev;
add9f9a8 450 struct amdgpu_bo *bo = NULL;
95d0906f
LL
451 uint32_t *msg;
452 int r, i;
453
add9f9a8
CK
454 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
455 AMDGPU_GEM_DOMAIN_VRAM,
456 &bo, NULL, (void **)&msg);
95d0906f
LL
457 if (r)
458 return r;
459
2d8a425b
LL
460 msg[0] = cpu_to_le32(0x00000028);
461 msg[1] = cpu_to_le32(0x00000018);
462 msg[2] = cpu_to_le32(0x00000000);
463 msg[3] = cpu_to_le32(0x00000002);
464 msg[4] = cpu_to_le32(handle);
465 msg[5] = cpu_to_le32(0x00000000);
466 for (i = 6; i < 1024; ++i)
95d0906f
LL
467 msg[i] = cpu_to_le32(0x0);
468
4c6530fd 469 return amdgpu_vcn_dec_send_msg(ring, bo, fence);
95d0906f
LL
470}
471
95d0906f
LL
472int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
473{
474 struct dma_fence *fence;
475 long r;
476
477 r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
98079389 478 if (r)
95d0906f 479 goto error;
95d0906f 480
4c6530fd 481 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
98079389 482 if (r)
95d0906f 483 goto error;
95d0906f
LL
484
485 r = dma_fence_wait_timeout(fence, false, timeout);
98079389 486 if (r == 0)
95d0906f 487 r = -ETIMEDOUT;
98079389 488 else if (r > 0)
95d0906f 489 r = 0;
95d0906f
LL
490
491 dma_fence_put(fence);
95d0906f
LL
492error:
493 return r;
494}
2d531d81 495
3e1086cf
LL
496int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
497{
498 struct amdgpu_device *adev = ring->adev;
e038b901 499 uint32_t rptr;
3e1086cf
LL
500 unsigned i;
501 int r;
502
503 r = amdgpu_ring_alloc(ring, 16);
dc9eeff8 504 if (r)
3e1086cf 505 return r;
dc9eeff8 506
e038b901
S
507 rptr = amdgpu_ring_get_rptr(ring);
508
c3bd3040 509 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
3e1086cf
LL
510 amdgpu_ring_commit(ring);
511
512 for (i = 0; i < adev->usec_timeout; i++) {
513 if (amdgpu_ring_get_rptr(ring) != rptr)
514 break;
c366be54 515 udelay(1);
3e1086cf
LL
516 }
517
dc9eeff8 518 if (i >= adev->usec_timeout)
3e1086cf 519 r = -ETIMEDOUT;
3e1086cf
LL
520
521 return r;
522}
523
2d531d81
LL
524static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
525 struct dma_fence **fence)
526{
25547cfd 527 const unsigned ib_size_dw = 16;
2d531d81
LL
528 struct amdgpu_job *job;
529 struct amdgpu_ib *ib;
530 struct dma_fence *f = NULL;
531 uint64_t dummy;
532 int i, r;
533
534 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
535 if (r)
536 return r;
537
538 ib = &job->ibs[0];
2d531d81
LL
539 dummy = ib->gpu_addr + 1024;
540
2d531d81 541 ib->length_dw = 0;
25547cfd
LL
542 ib->ptr[ib->length_dw++] = 0x00000018;
543 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
2d531d81 544 ib->ptr[ib->length_dw++] = handle;
25547cfd
LL
545 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
546 ib->ptr[ib->length_dw++] = dummy;
547 ib->ptr[ib->length_dw++] = 0x0000000b;
2d531d81 548
25547cfd
LL
549 ib->ptr[ib->length_dw++] = 0x00000014;
550 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
551 ib->ptr[ib->length_dw++] = 0x0000001c;
2d531d81
LL
552 ib->ptr[ib->length_dw++] = 0x00000000;
553 ib->ptr[ib->length_dw++] = 0x00000000;
554
25547cfd
LL
555 ib->ptr[ib->length_dw++] = 0x00000008;
556 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
2d531d81
LL
557
558 for (i = ib->length_dw; i < ib_size_dw; ++i)
559 ib->ptr[i] = 0x0;
560
ee913fd9 561 r = amdgpu_job_submit_direct(job, ring, &f);
2d531d81
LL
562 if (r)
563 goto err;
564
2d531d81
LL
565 if (fence)
566 *fence = dma_fence_get(f);
567 dma_fence_put(f);
25547cfd 568
2d531d81
LL
569 return 0;
570
571err:
572 amdgpu_job_free(job);
573 return r;
574}
575
576static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
25547cfd 577 struct dma_fence **fence)
2d531d81 578{
25547cfd 579 const unsigned ib_size_dw = 16;
2d531d81
LL
580 struct amdgpu_job *job;
581 struct amdgpu_ib *ib;
582 struct dma_fence *f = NULL;
25547cfd 583 uint64_t dummy;
2d531d81
LL
584 int i, r;
585
586 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
587 if (r)
588 return r;
589
590 ib = &job->ibs[0];
25547cfd 591 dummy = ib->gpu_addr + 1024;
2d531d81 592
2d531d81 593 ib->length_dw = 0;
25547cfd
LL
594 ib->ptr[ib->length_dw++] = 0x00000018;
595 ib->ptr[ib->length_dw++] = 0x00000001;
2d531d81 596 ib->ptr[ib->length_dw++] = handle;
25547cfd
LL
597 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
598 ib->ptr[ib->length_dw++] = dummy;
599 ib->ptr[ib->length_dw++] = 0x0000000b;
2d531d81 600
25547cfd
LL
601 ib->ptr[ib->length_dw++] = 0x00000014;
602 ib->ptr[ib->length_dw++] = 0x00000002;
603 ib->ptr[ib->length_dw++] = 0x0000001c;
2d531d81 604 ib->ptr[ib->length_dw++] = 0x00000000;
2d531d81
LL
605 ib->ptr[ib->length_dw++] = 0x00000000;
606
25547cfd
LL
607 ib->ptr[ib->length_dw++] = 0x00000008;
608 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
2d531d81
LL
609
610 for (i = ib->length_dw; i < ib_size_dw; ++i)
611 ib->ptr[i] = 0x0;
612
ee913fd9 613 r = amdgpu_job_submit_direct(job, ring, &f);
25547cfd
LL
614 if (r)
615 goto err;
2d531d81
LL
616
617 if (fence)
618 *fence = dma_fence_get(f);
619 dma_fence_put(f);
25547cfd 620
2d531d81
LL
621 return 0;
622
623err:
624 amdgpu_job_free(job);
625 return r;
626}
627
2d531d81
LL
628int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
629{
630 struct dma_fence *fence = NULL;
631 long r;
632
633 r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
98079389 634 if (r)
2d531d81 635 goto error;
2d531d81 636
25547cfd 637 r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
98079389 638 if (r)
2d531d81 639 goto error;
2d531d81
LL
640
641 r = dma_fence_wait_timeout(fence, false, timeout);
98079389 642 if (r == 0)
2d531d81 643 r = -ETIMEDOUT;
98079389 644 else if (r > 0)
2d531d81 645 r = 0;
98079389 646
2d531d81
LL
647error:
648 dma_fence_put(fence);
649 return r;
650}
b1d37606
BZ
651
652int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
653{
654 struct amdgpu_device *adev = ring->adev;
655 uint32_t tmp = 0;
656 unsigned i;
657 int r;
658
9085914a 659 WREG32(adev->vcn.external.jpeg_pitch, 0xCAFEDEAD);
b1d37606 660 r = amdgpu_ring_alloc(ring, 3);
dc9eeff8 661 if (r)
b1d37606 662 return r;
b1d37606 663
9085914a 664 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.jpeg_pitch, 0));
b1d37606
BZ
665 amdgpu_ring_write(ring, 0xDEADBEEF);
666 amdgpu_ring_commit(ring);
667
668 for (i = 0; i < adev->usec_timeout; i++) {
9085914a 669 tmp = RREG32(adev->vcn.external.jpeg_pitch);
b1d37606
BZ
670 if (tmp == 0xDEADBEEF)
671 break;
c366be54 672 udelay(1);
b1d37606
BZ
673 }
674
dc9eeff8
CK
675 if (i >= adev->usec_timeout)
676 r = -ETIMEDOUT;
b1d37606
BZ
677
678 return r;
679}
6173040f
BZ
680
681static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
682 struct dma_fence **fence)
683{
684 struct amdgpu_device *adev = ring->adev;
685 struct amdgpu_job *job;
686 struct amdgpu_ib *ib;
687 struct dma_fence *f = NULL;
688 const unsigned ib_size_dw = 16;
689 int i, r;
690
691 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
692 if (r)
693 return r;
694
695 ib = &job->ibs[0];
696
54bb93c2 697 ib->ptr[0] = PACKETJ(adev->vcn.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0);
6173040f
BZ
698 ib->ptr[1] = 0xDEADBEEF;
699 for (i = 2; i < 16; i += 2) {
700 ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
701 ib->ptr[i+1] = 0;
702 }
703 ib->length_dw = 16;
704
ee913fd9 705 r = amdgpu_job_submit_direct(job, ring, &f);
6173040f
BZ
706 if (r)
707 goto err;
708
6173040f
BZ
709 if (fence)
710 *fence = dma_fence_get(f);
711 dma_fence_put(f);
712
713 return 0;
714
715err:
716 amdgpu_job_free(job);
717 return r;
718}
719
720int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
721{
722 struct amdgpu_device *adev = ring->adev;
723 uint32_t tmp = 0;
724 unsigned i;
725 struct dma_fence *fence = NULL;
726 long r = 0;
727
728 r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
98079389 729 if (r)
6173040f 730 goto error;
6173040f
BZ
731
732 r = dma_fence_wait_timeout(fence, false, timeout);
733 if (r == 0) {
6173040f
BZ
734 r = -ETIMEDOUT;
735 goto error;
736 } else if (r < 0) {
6173040f 737 goto error;
98079389 738 } else {
6173040f 739 r = 0;
98079389 740 }
6173040f
BZ
741
742 for (i = 0; i < adev->usec_timeout; i++) {
54bb93c2 743 tmp = RREG32(adev->vcn.external.jpeg_pitch);
6173040f
BZ
744 if (tmp == 0xDEADBEEF)
745 break;
c366be54 746 udelay(1);
6173040f
BZ
747 }
748
98079389
CK
749 if (i >= adev->usec_timeout)
750 r = -ETIMEDOUT;
6173040f
BZ
751
752 dma_fence_put(fence);
6173040f
BZ
753error:
754 return r;
755}