Commit | Line | Data |
---|---|---|
95d0906f LL |
1 | /* |
2 | * Copyright 2016 Advanced Micro Devices, Inc. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | ||
27 | #include <linux/firmware.h> | |
28 | #include <linux/module.h> | |
29 | #include <drm/drmP.h> | |
30 | #include <drm/drm.h> | |
31 | ||
32 | #include "amdgpu.h" | |
33 | #include "amdgpu_pm.h" | |
34 | #include "amdgpu_vcn.h" | |
35 | #include "soc15d.h" | |
36 | #include "soc15_common.h" | |
37 | ||
b1ebd7c0 | 38 | #include "vcn/vcn_1_0_offset.h" |
95d0906f LL |
39 | |
40 | /* 1 second timeout */ | |
41 | #define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000) | |
42 | ||
43 | /* Firmware Names */ | |
44 | #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin" | |
45 | ||
46 | MODULE_FIRMWARE(FIRMWARE_RAVEN); | |
47 | ||
48 | static void amdgpu_vcn_idle_work_handler(struct work_struct *work); | |
49 | ||
50 | int amdgpu_vcn_sw_init(struct amdgpu_device *adev) | |
51 | { | |
95d0906f LL |
52 | unsigned long bo_size; |
53 | const char *fw_name; | |
54 | const struct common_firmware_header *hdr; | |
62d5b8e3 | 55 | unsigned char fw_check; |
95d0906f LL |
56 | int r; |
57 | ||
58 | INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler); | |
59 | ||
60 | switch (adev->asic_type) { | |
61 | case CHIP_RAVEN: | |
62 | fw_name = FIRMWARE_RAVEN; | |
63 | break; | |
64 | default: | |
65 | return -EINVAL; | |
66 | } | |
67 | ||
68 | r = request_firmware(&adev->vcn.fw, fw_name, adev->dev); | |
69 | if (r) { | |
70 | dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n", | |
71 | fw_name); | |
72 | return r; | |
73 | } | |
74 | ||
75 | r = amdgpu_ucode_validate(adev->vcn.fw); | |
76 | if (r) { | |
77 | dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n", | |
78 | fw_name); | |
79 | release_firmware(adev->vcn.fw); | |
80 | adev->vcn.fw = NULL; | |
81 | return r; | |
82 | } | |
83 | ||
84 | hdr = (const struct common_firmware_header *)adev->vcn.fw->data; | |
a0b2ac29 | 85 | adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version); |
95d0906f | 86 | |
62d5b8e3 JZ |
87 | /* Bit 20-23, it is encode major and non-zero for new naming convention. |
88 | * This field is part of version minor and DRM_DISABLED_FLAG in old naming | |
89 | * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG | |
90 | * is zero in old naming convention, this field is always zero so far. | |
91 | * These four bits are used to tell which naming convention is present. | |
92 | */ | |
93 | fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf; | |
94 | if (fw_check) { | |
95 | unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev; | |
96 | ||
97 | fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff; | |
98 | enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff; | |
99 | enc_major = fw_check; | |
100 | dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf; | |
101 | vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf; | |
102 | DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n", | |
103 | enc_major, enc_minor, dec_ver, vep, fw_rev); | |
104 | } else { | |
105 | unsigned int version_major, version_minor, family_id; | |
106 | ||
107 | family_id = le32_to_cpu(hdr->ucode_version) & 0xff; | |
108 | version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; | |
109 | version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; | |
110 | DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n", | |
111 | version_major, version_minor, family_id); | |
112 | } | |
95d0906f | 113 | |
4d77c0f6 | 114 | bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE |
95d0906f | 115 | + AMDGPU_VCN_SESSION_SIZE * 40; |
4d77c0f6 LG |
116 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) |
117 | bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); | |
95d0906f LL |
118 | r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, |
119 | AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo, | |
120 | &adev->vcn.gpu_addr, &adev->vcn.cpu_addr); | |
121 | if (r) { | |
122 | dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r); | |
123 | return r; | |
124 | } | |
125 | ||
95d0906f LL |
126 | return 0; |
127 | } | |
128 | ||
129 | int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) | |
130 | { | |
101c6fee LL |
131 | int i; |
132 | ||
c9533d1b | 133 | kvfree(adev->vcn.saved_bo); |
95d0906f | 134 | |
95d0906f LL |
135 | amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo, |
136 | &adev->vcn.gpu_addr, | |
137 | (void **)&adev->vcn.cpu_addr); | |
138 | ||
139 | amdgpu_ring_fini(&adev->vcn.ring_dec); | |
140 | ||
101c6fee LL |
141 | for (i = 0; i < adev->vcn.num_enc_rings; ++i) |
142 | amdgpu_ring_fini(&adev->vcn.ring_enc[i]); | |
143 | ||
0c5e4b3e BZ |
144 | amdgpu_ring_fini(&adev->vcn.ring_jpeg); |
145 | ||
95d0906f LL |
146 | release_firmware(adev->vcn.fw); |
147 | ||
148 | return 0; | |
149 | } | |
150 | ||
151 | int amdgpu_vcn_suspend(struct amdgpu_device *adev) | |
152 | { | |
153 | unsigned size; | |
154 | void *ptr; | |
155 | ||
156 | if (adev->vcn.vcpu_bo == NULL) | |
157 | return 0; | |
158 | ||
159 | cancel_delayed_work_sync(&adev->vcn.idle_work); | |
160 | ||
161 | size = amdgpu_bo_size(adev->vcn.vcpu_bo); | |
162 | ptr = adev->vcn.cpu_addr; | |
163 | ||
c9533d1b | 164 | adev->vcn.saved_bo = kvmalloc(size, GFP_KERNEL); |
95d0906f LL |
165 | if (!adev->vcn.saved_bo) |
166 | return -ENOMEM; | |
167 | ||
168 | memcpy_fromio(adev->vcn.saved_bo, ptr, size); | |
169 | ||
170 | return 0; | |
171 | } | |
172 | ||
173 | int amdgpu_vcn_resume(struct amdgpu_device *adev) | |
174 | { | |
175 | unsigned size; | |
176 | void *ptr; | |
177 | ||
178 | if (adev->vcn.vcpu_bo == NULL) | |
179 | return -EINVAL; | |
180 | ||
181 | size = amdgpu_bo_size(adev->vcn.vcpu_bo); | |
182 | ptr = adev->vcn.cpu_addr; | |
183 | ||
184 | if (adev->vcn.saved_bo != NULL) { | |
185 | memcpy_toio(ptr, adev->vcn.saved_bo, size); | |
c9533d1b | 186 | kvfree(adev->vcn.saved_bo); |
95d0906f LL |
187 | adev->vcn.saved_bo = NULL; |
188 | } else { | |
189 | const struct common_firmware_header *hdr; | |
190 | unsigned offset; | |
191 | ||
192 | hdr = (const struct common_firmware_header *)adev->vcn.fw->data; | |
4d77c0f6 LG |
193 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { |
194 | offset = le32_to_cpu(hdr->ucode_array_offset_bytes); | |
195 | memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset, | |
196 | le32_to_cpu(hdr->ucode_size_bytes)); | |
197 | size -= le32_to_cpu(hdr->ucode_size_bytes); | |
198 | ptr += le32_to_cpu(hdr->ucode_size_bytes); | |
199 | } | |
95d0906f LL |
200 | memset_io(ptr, 0, size); |
201 | } | |
202 | ||
203 | return 0; | |
204 | } | |
205 | ||
3e1086cf LL |
206 | static void amdgpu_vcn_idle_work_handler(struct work_struct *work) |
207 | { | |
208 | struct amdgpu_device *adev = | |
209 | container_of(work, struct amdgpu_device, vcn.idle_work.work); | |
210 | unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec); | |
646e906d AD |
211 | unsigned i; |
212 | ||
213 | for (i = 0; i < adev->vcn.num_enc_rings; ++i) { | |
214 | fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]); | |
215 | } | |
3e1086cf | 216 | |
7b4e54a9 LL |
217 | fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg); |
218 | ||
3e1086cf | 219 | if (fences == 0) { |
3fded222 | 220 | amdgpu_gfx_off_ctrl(adev, true); |
22cc6c5e | 221 | if (adev->pm.dpm_enabled) |
3e1086cf | 222 | amdgpu_dpm_enable_uvd(adev, false); |
22cc6c5e RZ |
223 | else |
224 | amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, | |
225 | AMD_PG_STATE_GATE); | |
3e1086cf LL |
226 | } else { |
227 | schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT); | |
228 | } | |
229 | } | |
230 | ||
231 | void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) | |
232 | { | |
233 | struct amdgpu_device *adev = ring->adev; | |
234 | bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work); | |
235 | ||
c1ee15b3 | 236 | if (set_clocks) { |
3fded222 | 237 | amdgpu_gfx_off_ctrl(adev, false); |
22cc6c5e RZ |
238 | if (adev->pm.dpm_enabled) |
239 | amdgpu_dpm_enable_uvd(adev, true); | |
240 | else | |
241 | amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, | |
242 | AMD_PG_STATE_UNGATE); | |
3e1086cf LL |
243 | } |
244 | } | |
245 | ||
246 | void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring) | |
247 | { | |
248 | schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT); | |
249 | } | |
250 | ||
8c303c01 LL |
251 | int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) |
252 | { | |
253 | struct amdgpu_device *adev = ring->adev; | |
254 | uint32_t tmp = 0; | |
255 | unsigned i; | |
256 | int r; | |
257 | ||
258 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD); | |
259 | r = amdgpu_ring_alloc(ring, 3); | |
260 | if (r) { | |
261 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", | |
262 | ring->idx, r); | |
263 | return r; | |
264 | } | |
265 | amdgpu_ring_write(ring, | |
266 | PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0)); | |
267 | amdgpu_ring_write(ring, 0xDEADBEEF); | |
268 | amdgpu_ring_commit(ring); | |
269 | for (i = 0; i < adev->usec_timeout; i++) { | |
270 | tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID)); | |
271 | if (tmp == 0xDEADBEEF) | |
272 | break; | |
273 | DRM_UDELAY(1); | |
274 | } | |
275 | ||
276 | if (i < adev->usec_timeout) { | |
9953b72f | 277 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", |
8c303c01 LL |
278 | ring->idx, i); |
279 | } else { | |
280 | DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", | |
281 | ring->idx, tmp); | |
282 | r = -EINVAL; | |
283 | } | |
284 | return r; | |
285 | } | |
286 | ||
add9f9a8 | 287 | static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, |
4c6530fd | 288 | struct amdgpu_bo *bo, |
add9f9a8 | 289 | struct dma_fence **fence) |
95d0906f | 290 | { |
add9f9a8 CK |
291 | struct amdgpu_device *adev = ring->adev; |
292 | struct dma_fence *f = NULL; | |
95d0906f LL |
293 | struct amdgpu_job *job; |
294 | struct amdgpu_ib *ib; | |
95d0906f LL |
295 | uint64_t addr; |
296 | int i, r; | |
297 | ||
95d0906f LL |
298 | r = amdgpu_job_alloc_with_ib(adev, 64, &job); |
299 | if (r) | |
300 | goto err; | |
301 | ||
302 | ib = &job->ibs[0]; | |
303 | addr = amdgpu_bo_gpu_offset(bo); | |
304 | ib->ptr[0] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0); | |
305 | ib->ptr[1] = addr; | |
306 | ib->ptr[2] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0); | |
307 | ib->ptr[3] = addr >> 32; | |
308 | ib->ptr[4] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0); | |
309 | ib->ptr[5] = 0; | |
310 | for (i = 6; i < 16; i += 2) { | |
311 | ib->ptr[i] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0); | |
312 | ib->ptr[i+1] = 0; | |
313 | } | |
314 | ib->length_dw = 16; | |
315 | ||
ee913fd9 | 316 | r = amdgpu_job_submit_direct(job, ring, &f); |
4c6530fd LL |
317 | if (r) |
318 | goto err_free; | |
95d0906f | 319 | |
add9f9a8 CK |
320 | amdgpu_bo_fence(bo, f, false); |
321 | amdgpu_bo_unreserve(bo); | |
322 | amdgpu_bo_unref(&bo); | |
95d0906f LL |
323 | |
324 | if (fence) | |
325 | *fence = dma_fence_get(f); | |
95d0906f LL |
326 | dma_fence_put(f); |
327 | ||
328 | return 0; | |
329 | ||
330 | err_free: | |
331 | amdgpu_job_free(job); | |
332 | ||
333 | err: | |
add9f9a8 CK |
334 | amdgpu_bo_unreserve(bo); |
335 | amdgpu_bo_unref(&bo); | |
95d0906f LL |
336 | return r; |
337 | } | |
338 | ||
339 | static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | |
340 | struct dma_fence **fence) | |
341 | { | |
342 | struct amdgpu_device *adev = ring->adev; | |
add9f9a8 | 343 | struct amdgpu_bo *bo = NULL; |
95d0906f LL |
344 | uint32_t *msg; |
345 | int r, i; | |
346 | ||
add9f9a8 CK |
347 | r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, |
348 | AMDGPU_GEM_DOMAIN_VRAM, | |
349 | &bo, NULL, (void **)&msg); | |
95d0906f LL |
350 | if (r) |
351 | return r; | |
352 | ||
2d8a425b | 353 | msg[0] = cpu_to_le32(0x00000028); |
3b8f5ab3 | 354 | msg[1] = cpu_to_le32(0x00000038); |
2d8a425b | 355 | msg[2] = cpu_to_le32(0x00000001); |
95d0906f | 356 | msg[3] = cpu_to_le32(0x00000000); |
2d8a425b | 357 | msg[4] = cpu_to_le32(handle); |
95d0906f | 358 | msg[5] = cpu_to_le32(0x00000000); |
2d8a425b LL |
359 | msg[6] = cpu_to_le32(0x00000001); |
360 | msg[7] = cpu_to_le32(0x00000028); | |
3b8f5ab3 | 361 | msg[8] = cpu_to_le32(0x00000010); |
95d0906f | 362 | msg[9] = cpu_to_le32(0x00000000); |
2d8a425b LL |
363 | msg[10] = cpu_to_le32(0x00000007); |
364 | msg[11] = cpu_to_le32(0x00000000); | |
3b8f5ab3 LL |
365 | msg[12] = cpu_to_le32(0x00000780); |
366 | msg[13] = cpu_to_le32(0x00000440); | |
367 | for (i = 14; i < 1024; ++i) | |
95d0906f LL |
368 | msg[i] = cpu_to_le32(0x0); |
369 | ||
4c6530fd | 370 | return amdgpu_vcn_dec_send_msg(ring, bo, fence); |
95d0906f LL |
371 | } |
372 | ||
373 | static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | |
4c6530fd | 374 | struct dma_fence **fence) |
95d0906f LL |
375 | { |
376 | struct amdgpu_device *adev = ring->adev; | |
add9f9a8 | 377 | struct amdgpu_bo *bo = NULL; |
95d0906f LL |
378 | uint32_t *msg; |
379 | int r, i; | |
380 | ||
add9f9a8 CK |
381 | r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, |
382 | AMDGPU_GEM_DOMAIN_VRAM, | |
383 | &bo, NULL, (void **)&msg); | |
95d0906f LL |
384 | if (r) |
385 | return r; | |
386 | ||
2d8a425b LL |
387 | msg[0] = cpu_to_le32(0x00000028); |
388 | msg[1] = cpu_to_le32(0x00000018); | |
389 | msg[2] = cpu_to_le32(0x00000000); | |
390 | msg[3] = cpu_to_le32(0x00000002); | |
391 | msg[4] = cpu_to_le32(handle); | |
392 | msg[5] = cpu_to_le32(0x00000000); | |
393 | for (i = 6; i < 1024; ++i) | |
95d0906f LL |
394 | msg[i] = cpu_to_le32(0x0); |
395 | ||
4c6530fd | 396 | return amdgpu_vcn_dec_send_msg(ring, bo, fence); |
95d0906f LL |
397 | } |
398 | ||
95d0906f LL |
399 | int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) |
400 | { | |
401 | struct dma_fence *fence; | |
402 | long r; | |
403 | ||
404 | r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL); | |
405 | if (r) { | |
406 | DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); | |
407 | goto error; | |
408 | } | |
409 | ||
4c6530fd | 410 | r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence); |
95d0906f LL |
411 | if (r) { |
412 | DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); | |
413 | goto error; | |
414 | } | |
415 | ||
416 | r = dma_fence_wait_timeout(fence, false, timeout); | |
417 | if (r == 0) { | |
418 | DRM_ERROR("amdgpu: IB test timed out.\n"); | |
419 | r = -ETIMEDOUT; | |
420 | } else if (r < 0) { | |
421 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | |
422 | } else { | |
9953b72f | 423 | DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); |
95d0906f LL |
424 | r = 0; |
425 | } | |
426 | ||
427 | dma_fence_put(fence); | |
428 | ||
429 | error: | |
430 | return r; | |
431 | } | |
2d531d81 | 432 | |
3e1086cf LL |
433 | int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring) |
434 | { | |
435 | struct amdgpu_device *adev = ring->adev; | |
436 | uint32_t rptr = amdgpu_ring_get_rptr(ring); | |
437 | unsigned i; | |
438 | int r; | |
439 | ||
440 | r = amdgpu_ring_alloc(ring, 16); | |
441 | if (r) { | |
442 | DRM_ERROR("amdgpu: vcn enc failed to lock ring %d (%d).\n", | |
443 | ring->idx, r); | |
444 | return r; | |
445 | } | |
c3bd3040 | 446 | amdgpu_ring_write(ring, VCN_ENC_CMD_END); |
3e1086cf LL |
447 | amdgpu_ring_commit(ring); |
448 | ||
449 | for (i = 0; i < adev->usec_timeout; i++) { | |
450 | if (amdgpu_ring_get_rptr(ring) != rptr) | |
451 | break; | |
452 | DRM_UDELAY(1); | |
453 | } | |
454 | ||
455 | if (i < adev->usec_timeout) { | |
9953b72f | 456 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", |
3e1086cf LL |
457 | ring->idx, i); |
458 | } else { | |
459 | DRM_ERROR("amdgpu: ring %d test failed\n", | |
460 | ring->idx); | |
461 | r = -ETIMEDOUT; | |
462 | } | |
463 | ||
464 | return r; | |
465 | } | |
466 | ||
2d531d81 LL |
467 | static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, |
468 | struct dma_fence **fence) | |
469 | { | |
25547cfd | 470 | const unsigned ib_size_dw = 16; |
2d531d81 LL |
471 | struct amdgpu_job *job; |
472 | struct amdgpu_ib *ib; | |
473 | struct dma_fence *f = NULL; | |
474 | uint64_t dummy; | |
475 | int i, r; | |
476 | ||
477 | r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); | |
478 | if (r) | |
479 | return r; | |
480 | ||
481 | ib = &job->ibs[0]; | |
2d531d81 LL |
482 | dummy = ib->gpu_addr + 1024; |
483 | ||
2d531d81 | 484 | ib->length_dw = 0; |
25547cfd LL |
485 | ib->ptr[ib->length_dw++] = 0x00000018; |
486 | ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ | |
2d531d81 | 487 | ib->ptr[ib->length_dw++] = handle; |
25547cfd LL |
488 | ib->ptr[ib->length_dw++] = upper_32_bits(dummy); |
489 | ib->ptr[ib->length_dw++] = dummy; | |
490 | ib->ptr[ib->length_dw++] = 0x0000000b; | |
2d531d81 | 491 | |
25547cfd LL |
492 | ib->ptr[ib->length_dw++] = 0x00000014; |
493 | ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ | |
494 | ib->ptr[ib->length_dw++] = 0x0000001c; | |
2d531d81 LL |
495 | ib->ptr[ib->length_dw++] = 0x00000000; |
496 | ib->ptr[ib->length_dw++] = 0x00000000; | |
497 | ||
25547cfd LL |
498 | ib->ptr[ib->length_dw++] = 0x00000008; |
499 | ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */ | |
2d531d81 LL |
500 | |
501 | for (i = ib->length_dw; i < ib_size_dw; ++i) | |
502 | ib->ptr[i] = 0x0; | |
503 | ||
ee913fd9 | 504 | r = amdgpu_job_submit_direct(job, ring, &f); |
2d531d81 LL |
505 | if (r) |
506 | goto err; | |
507 | ||
2d531d81 LL |
508 | if (fence) |
509 | *fence = dma_fence_get(f); | |
510 | dma_fence_put(f); | |
25547cfd | 511 | |
2d531d81 LL |
512 | return 0; |
513 | ||
514 | err: | |
515 | amdgpu_job_free(job); | |
516 | return r; | |
517 | } | |
518 | ||
519 | static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | |
25547cfd | 520 | struct dma_fence **fence) |
2d531d81 | 521 | { |
25547cfd | 522 | const unsigned ib_size_dw = 16; |
2d531d81 LL |
523 | struct amdgpu_job *job; |
524 | struct amdgpu_ib *ib; | |
525 | struct dma_fence *f = NULL; | |
25547cfd | 526 | uint64_t dummy; |
2d531d81 LL |
527 | int i, r; |
528 | ||
529 | r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); | |
530 | if (r) | |
531 | return r; | |
532 | ||
533 | ib = &job->ibs[0]; | |
25547cfd | 534 | dummy = ib->gpu_addr + 1024; |
2d531d81 | 535 | |
2d531d81 | 536 | ib->length_dw = 0; |
25547cfd LL |
537 | ib->ptr[ib->length_dw++] = 0x00000018; |
538 | ib->ptr[ib->length_dw++] = 0x00000001; | |
2d531d81 | 539 | ib->ptr[ib->length_dw++] = handle; |
25547cfd LL |
540 | ib->ptr[ib->length_dw++] = upper_32_bits(dummy); |
541 | ib->ptr[ib->length_dw++] = dummy; | |
542 | ib->ptr[ib->length_dw++] = 0x0000000b; | |
2d531d81 | 543 | |
25547cfd LL |
544 | ib->ptr[ib->length_dw++] = 0x00000014; |
545 | ib->ptr[ib->length_dw++] = 0x00000002; | |
546 | ib->ptr[ib->length_dw++] = 0x0000001c; | |
2d531d81 | 547 | ib->ptr[ib->length_dw++] = 0x00000000; |
2d531d81 LL |
548 | ib->ptr[ib->length_dw++] = 0x00000000; |
549 | ||
25547cfd LL |
550 | ib->ptr[ib->length_dw++] = 0x00000008; |
551 | ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */ | |
2d531d81 LL |
552 | |
553 | for (i = ib->length_dw; i < ib_size_dw; ++i) | |
554 | ib->ptr[i] = 0x0; | |
555 | ||
ee913fd9 | 556 | r = amdgpu_job_submit_direct(job, ring, &f); |
25547cfd LL |
557 | if (r) |
558 | goto err; | |
2d531d81 LL |
559 | |
560 | if (fence) | |
561 | *fence = dma_fence_get(f); | |
562 | dma_fence_put(f); | |
25547cfd | 563 | |
2d531d81 LL |
564 | return 0; |
565 | ||
566 | err: | |
567 | amdgpu_job_free(job); | |
568 | return r; | |
569 | } | |
570 | ||
2d531d81 LL |
571 | int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) |
572 | { | |
573 | struct dma_fence *fence = NULL; | |
574 | long r; | |
575 | ||
576 | r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL); | |
577 | if (r) { | |
578 | DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); | |
579 | goto error; | |
580 | } | |
581 | ||
25547cfd | 582 | r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence); |
2d531d81 LL |
583 | if (r) { |
584 | DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); | |
585 | goto error; | |
586 | } | |
587 | ||
588 | r = dma_fence_wait_timeout(fence, false, timeout); | |
589 | if (r == 0) { | |
590 | DRM_ERROR("amdgpu: IB test timed out.\n"); | |
591 | r = -ETIMEDOUT; | |
592 | } else if (r < 0) { | |
593 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | |
594 | } else { | |
9953b72f | 595 | DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); |
2d531d81 LL |
596 | r = 0; |
597 | } | |
598 | error: | |
599 | dma_fence_put(fence); | |
600 | return r; | |
601 | } | |
b1d37606 BZ |
602 | |
603 | int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring) | |
604 | { | |
605 | struct amdgpu_device *adev = ring->adev; | |
606 | uint32_t tmp = 0; | |
607 | unsigned i; | |
608 | int r; | |
609 | ||
610 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD); | |
611 | r = amdgpu_ring_alloc(ring, 3); | |
612 | ||
613 | if (r) { | |
614 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", | |
615 | ring->idx, r); | |
616 | return r; | |
617 | } | |
618 | ||
619 | amdgpu_ring_write(ring, | |
620 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0, 0, 0)); | |
621 | amdgpu_ring_write(ring, 0xDEADBEEF); | |
622 | amdgpu_ring_commit(ring); | |
623 | ||
624 | for (i = 0; i < adev->usec_timeout; i++) { | |
625 | tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID)); | |
626 | if (tmp == 0xDEADBEEF) | |
627 | break; | |
628 | DRM_UDELAY(1); | |
629 | } | |
630 | ||
631 | if (i < adev->usec_timeout) { | |
632 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", | |
633 | ring->idx, i); | |
634 | } else { | |
635 | DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", | |
636 | ring->idx, tmp); | |
637 | r = -EINVAL; | |
638 | } | |
639 | ||
640 | return r; | |
641 | } | |
6173040f BZ |
642 | |
643 | static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle, | |
644 | struct dma_fence **fence) | |
645 | { | |
646 | struct amdgpu_device *adev = ring->adev; | |
647 | struct amdgpu_job *job; | |
648 | struct amdgpu_ib *ib; | |
649 | struct dma_fence *f = NULL; | |
650 | const unsigned ib_size_dw = 16; | |
651 | int i, r; | |
652 | ||
653 | r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); | |
654 | if (r) | |
655 | return r; | |
656 | ||
657 | ib = &job->ibs[0]; | |
658 | ||
659 | ib->ptr[0] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH), 0, 0, PACKETJ_TYPE0); | |
660 | ib->ptr[1] = 0xDEADBEEF; | |
661 | for (i = 2; i < 16; i += 2) { | |
662 | ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6); | |
663 | ib->ptr[i+1] = 0; | |
664 | } | |
665 | ib->length_dw = 16; | |
666 | ||
ee913fd9 | 667 | r = amdgpu_job_submit_direct(job, ring, &f); |
6173040f BZ |
668 | if (r) |
669 | goto err; | |
670 | ||
6173040f BZ |
671 | if (fence) |
672 | *fence = dma_fence_get(f); | |
673 | dma_fence_put(f); | |
674 | ||
675 | return 0; | |
676 | ||
677 | err: | |
678 | amdgpu_job_free(job); | |
679 | return r; | |
680 | } | |
681 | ||
682 | int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |
683 | { | |
684 | struct amdgpu_device *adev = ring->adev; | |
685 | uint32_t tmp = 0; | |
686 | unsigned i; | |
687 | struct dma_fence *fence = NULL; | |
688 | long r = 0; | |
689 | ||
690 | r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence); | |
691 | if (r) { | |
692 | DRM_ERROR("amdgpu: failed to set jpeg register (%ld).\n", r); | |
693 | goto error; | |
694 | } | |
695 | ||
696 | r = dma_fence_wait_timeout(fence, false, timeout); | |
697 | if (r == 0) { | |
698 | DRM_ERROR("amdgpu: IB test timed out.\n"); | |
699 | r = -ETIMEDOUT; | |
700 | goto error; | |
701 | } else if (r < 0) { | |
702 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | |
703 | goto error; | |
704 | } else | |
705 | r = 0; | |
706 | ||
707 | for (i = 0; i < adev->usec_timeout; i++) { | |
708 | tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH)); | |
709 | if (tmp == 0xDEADBEEF) | |
710 | break; | |
711 | DRM_UDELAY(1); | |
712 | } | |
713 | ||
714 | if (i < adev->usec_timeout) | |
715 | DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); | |
716 | else { | |
717 | DRM_ERROR("ib test failed (0x%08X)\n", tmp); | |
718 | r = -EINVAL; | |
719 | } | |
720 | ||
721 | dma_fence_put(fence); | |
722 | ||
723 | error: | |
724 | return r; | |
725 | } |