Commit | Line | Data |
---|---|---|
95d0906f LL |
1 | /* |
2 | * Copyright 2016 Advanced Micro Devices, Inc. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | ||
27 | #include <linux/firmware.h> | |
28 | #include <linux/module.h> | |
29 | #include <drm/drmP.h> | |
30 | #include <drm/drm.h> | |
31 | ||
32 | #include "amdgpu.h" | |
33 | #include "amdgpu_pm.h" | |
34 | #include "amdgpu_vcn.h" | |
35 | #include "soc15d.h" | |
36 | #include "soc15_common.h" | |
37 | ||
b1ebd7c0 | 38 | #include "vcn/vcn_1_0_offset.h" |
95d0906f LL |
39 | |
40 | /* 1 second timeout */ | |
41 | #define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000) | |
42 | ||
43 | /* Firmware Names */ | |
44 | #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin" | |
45 | ||
46 | MODULE_FIRMWARE(FIRMWARE_RAVEN); | |
47 | ||
48 | static void amdgpu_vcn_idle_work_handler(struct work_struct *work); | |
49 | ||
50 | int amdgpu_vcn_sw_init(struct amdgpu_device *adev) | |
51 | { | |
95d0906f LL |
52 | unsigned long bo_size; |
53 | const char *fw_name; | |
54 | const struct common_firmware_header *hdr; | |
62d5b8e3 | 55 | unsigned char fw_check; |
95d0906f LL |
56 | int r; |
57 | ||
58 | INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler); | |
59 | ||
60 | switch (adev->asic_type) { | |
61 | case CHIP_RAVEN: | |
62 | fw_name = FIRMWARE_RAVEN; | |
63 | break; | |
64 | default: | |
65 | return -EINVAL; | |
66 | } | |
67 | ||
68 | r = request_firmware(&adev->vcn.fw, fw_name, adev->dev); | |
69 | if (r) { | |
70 | dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n", | |
71 | fw_name); | |
72 | return r; | |
73 | } | |
74 | ||
75 | r = amdgpu_ucode_validate(adev->vcn.fw); | |
76 | if (r) { | |
77 | dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n", | |
78 | fw_name); | |
79 | release_firmware(adev->vcn.fw); | |
80 | adev->vcn.fw = NULL; | |
81 | return r; | |
82 | } | |
83 | ||
84 | hdr = (const struct common_firmware_header *)adev->vcn.fw->data; | |
a0b2ac29 | 85 | adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version); |
95d0906f | 86 | |
62d5b8e3 JZ |
87 | /* Bit 20-23, it is encode major and non-zero for new naming convention. |
88 | * This field is part of version minor and DRM_DISABLED_FLAG in old naming | |
89 | * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG | |
90 | * is zero in old naming convention, this field is always zero so far. | |
91 | * These four bits are used to tell which naming convention is present. | |
92 | */ | |
93 | fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf; | |
94 | if (fw_check) { | |
95 | unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev; | |
96 | ||
97 | fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff; | |
98 | enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff; | |
99 | enc_major = fw_check; | |
100 | dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf; | |
101 | vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf; | |
102 | DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n", | |
103 | enc_major, enc_minor, dec_ver, vep, fw_rev); | |
104 | } else { | |
105 | unsigned int version_major, version_minor, family_id; | |
106 | ||
107 | family_id = le32_to_cpu(hdr->ucode_version) & 0xff; | |
108 | version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; | |
109 | version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; | |
110 | DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n", | |
111 | version_major, version_minor, family_id); | |
112 | } | |
95d0906f LL |
113 | |
114 | bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8) | |
115 | + AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE | |
116 | + AMDGPU_VCN_SESSION_SIZE * 40; | |
117 | r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, | |
118 | AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo, | |
119 | &adev->vcn.gpu_addr, &adev->vcn.cpu_addr); | |
120 | if (r) { | |
121 | dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r); | |
122 | return r; | |
123 | } | |
124 | ||
95d0906f LL |
125 | return 0; |
126 | } | |
127 | ||
128 | int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) | |
129 | { | |
101c6fee LL |
130 | int i; |
131 | ||
95d0906f LL |
132 | kfree(adev->vcn.saved_bo); |
133 | ||
95d0906f LL |
134 | amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo, |
135 | &adev->vcn.gpu_addr, | |
136 | (void **)&adev->vcn.cpu_addr); | |
137 | ||
138 | amdgpu_ring_fini(&adev->vcn.ring_dec); | |
139 | ||
101c6fee LL |
140 | for (i = 0; i < adev->vcn.num_enc_rings; ++i) |
141 | amdgpu_ring_fini(&adev->vcn.ring_enc[i]); | |
142 | ||
0c5e4b3e BZ |
143 | amdgpu_ring_fini(&adev->vcn.ring_jpeg); |
144 | ||
95d0906f LL |
145 | release_firmware(adev->vcn.fw); |
146 | ||
147 | return 0; | |
148 | } | |
149 | ||
150 | int amdgpu_vcn_suspend(struct amdgpu_device *adev) | |
151 | { | |
152 | unsigned size; | |
153 | void *ptr; | |
154 | ||
155 | if (adev->vcn.vcpu_bo == NULL) | |
156 | return 0; | |
157 | ||
158 | cancel_delayed_work_sync(&adev->vcn.idle_work); | |
159 | ||
160 | size = amdgpu_bo_size(adev->vcn.vcpu_bo); | |
161 | ptr = adev->vcn.cpu_addr; | |
162 | ||
163 | adev->vcn.saved_bo = kmalloc(size, GFP_KERNEL); | |
164 | if (!adev->vcn.saved_bo) | |
165 | return -ENOMEM; | |
166 | ||
167 | memcpy_fromio(adev->vcn.saved_bo, ptr, size); | |
168 | ||
169 | return 0; | |
170 | } | |
171 | ||
172 | int amdgpu_vcn_resume(struct amdgpu_device *adev) | |
173 | { | |
174 | unsigned size; | |
175 | void *ptr; | |
176 | ||
177 | if (adev->vcn.vcpu_bo == NULL) | |
178 | return -EINVAL; | |
179 | ||
180 | size = amdgpu_bo_size(adev->vcn.vcpu_bo); | |
181 | ptr = adev->vcn.cpu_addr; | |
182 | ||
183 | if (adev->vcn.saved_bo != NULL) { | |
184 | memcpy_toio(ptr, adev->vcn.saved_bo, size); | |
185 | kfree(adev->vcn.saved_bo); | |
186 | adev->vcn.saved_bo = NULL; | |
187 | } else { | |
188 | const struct common_firmware_header *hdr; | |
189 | unsigned offset; | |
190 | ||
191 | hdr = (const struct common_firmware_header *)adev->vcn.fw->data; | |
192 | offset = le32_to_cpu(hdr->ucode_array_offset_bytes); | |
193 | memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset, | |
194 | le32_to_cpu(hdr->ucode_size_bytes)); | |
195 | size -= le32_to_cpu(hdr->ucode_size_bytes); | |
196 | ptr += le32_to_cpu(hdr->ucode_size_bytes); | |
197 | memset_io(ptr, 0, size); | |
198 | } | |
199 | ||
200 | return 0; | |
201 | } | |
202 | ||
3e1086cf LL |
203 | static void amdgpu_vcn_idle_work_handler(struct work_struct *work) |
204 | { | |
205 | struct amdgpu_device *adev = | |
206 | container_of(work, struct amdgpu_device, vcn.idle_work.work); | |
207 | unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec); | |
646e906d AD |
208 | unsigned i; |
209 | ||
210 | for (i = 0; i < adev->vcn.num_enc_rings; ++i) { | |
211 | fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]); | |
212 | } | |
3e1086cf | 213 | |
7b4e54a9 LL |
214 | fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg); |
215 | ||
3e1086cf | 216 | if (fences == 0) { |
22cc6c5e | 217 | if (adev->pm.dpm_enabled) |
3e1086cf | 218 | amdgpu_dpm_enable_uvd(adev, false); |
22cc6c5e RZ |
219 | else |
220 | amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, | |
221 | AMD_PG_STATE_GATE); | |
3e1086cf LL |
222 | } else { |
223 | schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT); | |
224 | } | |
225 | } | |
226 | ||
227 | void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) | |
228 | { | |
229 | struct amdgpu_device *adev = ring->adev; | |
230 | bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work); | |
231 | ||
c1ee15b3 | 232 | if (set_clocks) { |
22cc6c5e RZ |
233 | if (adev->pm.dpm_enabled) |
234 | amdgpu_dpm_enable_uvd(adev, true); | |
235 | else | |
236 | amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, | |
237 | AMD_PG_STATE_UNGATE); | |
3e1086cf LL |
238 | } |
239 | } | |
240 | ||
241 | void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring) | |
242 | { | |
243 | schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT); | |
244 | } | |
245 | ||
8c303c01 LL |
246 | int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) |
247 | { | |
248 | struct amdgpu_device *adev = ring->adev; | |
249 | uint32_t tmp = 0; | |
250 | unsigned i; | |
251 | int r; | |
252 | ||
253 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD); | |
254 | r = amdgpu_ring_alloc(ring, 3); | |
255 | if (r) { | |
256 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", | |
257 | ring->idx, r); | |
258 | return r; | |
259 | } | |
260 | amdgpu_ring_write(ring, | |
261 | PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0)); | |
262 | amdgpu_ring_write(ring, 0xDEADBEEF); | |
263 | amdgpu_ring_commit(ring); | |
264 | for (i = 0; i < adev->usec_timeout; i++) { | |
265 | tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID)); | |
266 | if (tmp == 0xDEADBEEF) | |
267 | break; | |
268 | DRM_UDELAY(1); | |
269 | } | |
270 | ||
271 | if (i < adev->usec_timeout) { | |
9953b72f | 272 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", |
8c303c01 LL |
273 | ring->idx, i); |
274 | } else { | |
275 | DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", | |
276 | ring->idx, tmp); | |
277 | r = -EINVAL; | |
278 | } | |
279 | return r; | |
280 | } | |
281 | ||
add9f9a8 | 282 | static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, |
4c6530fd | 283 | struct amdgpu_bo *bo, |
add9f9a8 | 284 | struct dma_fence **fence) |
95d0906f | 285 | { |
add9f9a8 CK |
286 | struct amdgpu_device *adev = ring->adev; |
287 | struct dma_fence *f = NULL; | |
95d0906f LL |
288 | struct amdgpu_job *job; |
289 | struct amdgpu_ib *ib; | |
95d0906f LL |
290 | uint64_t addr; |
291 | int i, r; | |
292 | ||
95d0906f LL |
293 | r = amdgpu_job_alloc_with_ib(adev, 64, &job); |
294 | if (r) | |
295 | goto err; | |
296 | ||
297 | ib = &job->ibs[0]; | |
298 | addr = amdgpu_bo_gpu_offset(bo); | |
299 | ib->ptr[0] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0); | |
300 | ib->ptr[1] = addr; | |
301 | ib->ptr[2] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0); | |
302 | ib->ptr[3] = addr >> 32; | |
303 | ib->ptr[4] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0); | |
304 | ib->ptr[5] = 0; | |
305 | for (i = 6; i < 16; i += 2) { | |
306 | ib->ptr[i] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0); | |
307 | ib->ptr[i+1] = 0; | |
308 | } | |
309 | ib->length_dw = 16; | |
310 | ||
4c6530fd LL |
311 | r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); |
312 | job->fence = dma_fence_get(f); | |
313 | if (r) | |
314 | goto err_free; | |
95d0906f | 315 | |
4c6530fd | 316 | amdgpu_job_free(job); |
95d0906f | 317 | |
add9f9a8 CK |
318 | amdgpu_bo_fence(bo, f, false); |
319 | amdgpu_bo_unreserve(bo); | |
320 | amdgpu_bo_unref(&bo); | |
95d0906f LL |
321 | |
322 | if (fence) | |
323 | *fence = dma_fence_get(f); | |
95d0906f LL |
324 | dma_fence_put(f); |
325 | ||
326 | return 0; | |
327 | ||
328 | err_free: | |
329 | amdgpu_job_free(job); | |
330 | ||
331 | err: | |
add9f9a8 CK |
332 | amdgpu_bo_unreserve(bo); |
333 | amdgpu_bo_unref(&bo); | |
95d0906f LL |
334 | return r; |
335 | } | |
336 | ||
337 | static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | |
338 | struct dma_fence **fence) | |
339 | { | |
340 | struct amdgpu_device *adev = ring->adev; | |
add9f9a8 | 341 | struct amdgpu_bo *bo = NULL; |
95d0906f LL |
342 | uint32_t *msg; |
343 | int r, i; | |
344 | ||
add9f9a8 CK |
345 | r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, |
346 | AMDGPU_GEM_DOMAIN_VRAM, | |
347 | &bo, NULL, (void **)&msg); | |
95d0906f LL |
348 | if (r) |
349 | return r; | |
350 | ||
2d8a425b | 351 | msg[0] = cpu_to_le32(0x00000028); |
3b8f5ab3 | 352 | msg[1] = cpu_to_le32(0x00000038); |
2d8a425b | 353 | msg[2] = cpu_to_le32(0x00000001); |
95d0906f | 354 | msg[3] = cpu_to_le32(0x00000000); |
2d8a425b | 355 | msg[4] = cpu_to_le32(handle); |
95d0906f | 356 | msg[5] = cpu_to_le32(0x00000000); |
2d8a425b LL |
357 | msg[6] = cpu_to_le32(0x00000001); |
358 | msg[7] = cpu_to_le32(0x00000028); | |
3b8f5ab3 | 359 | msg[8] = cpu_to_le32(0x00000010); |
95d0906f | 360 | msg[9] = cpu_to_le32(0x00000000); |
2d8a425b LL |
361 | msg[10] = cpu_to_le32(0x00000007); |
362 | msg[11] = cpu_to_le32(0x00000000); | |
3b8f5ab3 LL |
363 | msg[12] = cpu_to_le32(0x00000780); |
364 | msg[13] = cpu_to_le32(0x00000440); | |
365 | for (i = 14; i < 1024; ++i) | |
95d0906f LL |
366 | msg[i] = cpu_to_le32(0x0); |
367 | ||
4c6530fd | 368 | return amdgpu_vcn_dec_send_msg(ring, bo, fence); |
95d0906f LL |
369 | } |
370 | ||
371 | static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | |
4c6530fd | 372 | struct dma_fence **fence) |
95d0906f LL |
373 | { |
374 | struct amdgpu_device *adev = ring->adev; | |
add9f9a8 | 375 | struct amdgpu_bo *bo = NULL; |
95d0906f LL |
376 | uint32_t *msg; |
377 | int r, i; | |
378 | ||
add9f9a8 CK |
379 | r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, |
380 | AMDGPU_GEM_DOMAIN_VRAM, | |
381 | &bo, NULL, (void **)&msg); | |
95d0906f LL |
382 | if (r) |
383 | return r; | |
384 | ||
2d8a425b LL |
385 | msg[0] = cpu_to_le32(0x00000028); |
386 | msg[1] = cpu_to_le32(0x00000018); | |
387 | msg[2] = cpu_to_le32(0x00000000); | |
388 | msg[3] = cpu_to_le32(0x00000002); | |
389 | msg[4] = cpu_to_le32(handle); | |
390 | msg[5] = cpu_to_le32(0x00000000); | |
391 | for (i = 6; i < 1024; ++i) | |
95d0906f LL |
392 | msg[i] = cpu_to_le32(0x0); |
393 | ||
4c6530fd | 394 | return amdgpu_vcn_dec_send_msg(ring, bo, fence); |
95d0906f LL |
395 | } |
396 | ||
95d0906f LL |
397 | int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) |
398 | { | |
399 | struct dma_fence *fence; | |
400 | long r; | |
401 | ||
402 | r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL); | |
403 | if (r) { | |
404 | DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); | |
405 | goto error; | |
406 | } | |
407 | ||
4c6530fd | 408 | r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence); |
95d0906f LL |
409 | if (r) { |
410 | DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); | |
411 | goto error; | |
412 | } | |
413 | ||
414 | r = dma_fence_wait_timeout(fence, false, timeout); | |
415 | if (r == 0) { | |
416 | DRM_ERROR("amdgpu: IB test timed out.\n"); | |
417 | r = -ETIMEDOUT; | |
418 | } else if (r < 0) { | |
419 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | |
420 | } else { | |
9953b72f | 421 | DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); |
95d0906f LL |
422 | r = 0; |
423 | } | |
424 | ||
425 | dma_fence_put(fence); | |
426 | ||
427 | error: | |
428 | return r; | |
429 | } | |
2d531d81 | 430 | |
3e1086cf LL |
431 | int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring) |
432 | { | |
433 | struct amdgpu_device *adev = ring->adev; | |
434 | uint32_t rptr = amdgpu_ring_get_rptr(ring); | |
435 | unsigned i; | |
436 | int r; | |
437 | ||
438 | r = amdgpu_ring_alloc(ring, 16); | |
439 | if (r) { | |
440 | DRM_ERROR("amdgpu: vcn enc failed to lock ring %d (%d).\n", | |
441 | ring->idx, r); | |
442 | return r; | |
443 | } | |
c3bd3040 | 444 | amdgpu_ring_write(ring, VCN_ENC_CMD_END); |
3e1086cf LL |
445 | amdgpu_ring_commit(ring); |
446 | ||
447 | for (i = 0; i < adev->usec_timeout; i++) { | |
448 | if (amdgpu_ring_get_rptr(ring) != rptr) | |
449 | break; | |
450 | DRM_UDELAY(1); | |
451 | } | |
452 | ||
453 | if (i < adev->usec_timeout) { | |
9953b72f | 454 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", |
3e1086cf LL |
455 | ring->idx, i); |
456 | } else { | |
457 | DRM_ERROR("amdgpu: ring %d test failed\n", | |
458 | ring->idx); | |
459 | r = -ETIMEDOUT; | |
460 | } | |
461 | ||
462 | return r; | |
463 | } | |
464 | ||
2d531d81 LL |
465 | static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, |
466 | struct dma_fence **fence) | |
467 | { | |
25547cfd | 468 | const unsigned ib_size_dw = 16; |
2d531d81 LL |
469 | struct amdgpu_job *job; |
470 | struct amdgpu_ib *ib; | |
471 | struct dma_fence *f = NULL; | |
472 | uint64_t dummy; | |
473 | int i, r; | |
474 | ||
475 | r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); | |
476 | if (r) | |
477 | return r; | |
478 | ||
479 | ib = &job->ibs[0]; | |
2d531d81 LL |
480 | dummy = ib->gpu_addr + 1024; |
481 | ||
2d531d81 | 482 | ib->length_dw = 0; |
25547cfd LL |
483 | ib->ptr[ib->length_dw++] = 0x00000018; |
484 | ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ | |
2d531d81 | 485 | ib->ptr[ib->length_dw++] = handle; |
25547cfd LL |
486 | ib->ptr[ib->length_dw++] = upper_32_bits(dummy); |
487 | ib->ptr[ib->length_dw++] = dummy; | |
488 | ib->ptr[ib->length_dw++] = 0x0000000b; | |
2d531d81 | 489 | |
25547cfd LL |
490 | ib->ptr[ib->length_dw++] = 0x00000014; |
491 | ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ | |
492 | ib->ptr[ib->length_dw++] = 0x0000001c; | |
2d531d81 LL |
493 | ib->ptr[ib->length_dw++] = 0x00000000; |
494 | ib->ptr[ib->length_dw++] = 0x00000000; | |
495 | ||
25547cfd LL |
496 | ib->ptr[ib->length_dw++] = 0x00000008; |
497 | ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */ | |
2d531d81 LL |
498 | |
499 | for (i = ib->length_dw; i < ib_size_dw; ++i) | |
500 | ib->ptr[i] = 0x0; | |
501 | ||
502 | r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); | |
503 | job->fence = dma_fence_get(f); | |
504 | if (r) | |
505 | goto err; | |
506 | ||
507 | amdgpu_job_free(job); | |
508 | if (fence) | |
509 | *fence = dma_fence_get(f); | |
510 | dma_fence_put(f); | |
25547cfd | 511 | |
2d531d81 LL |
512 | return 0; |
513 | ||
514 | err: | |
515 | amdgpu_job_free(job); | |
516 | return r; | |
517 | } | |
518 | ||
519 | static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | |
25547cfd | 520 | struct dma_fence **fence) |
2d531d81 | 521 | { |
25547cfd | 522 | const unsigned ib_size_dw = 16; |
2d531d81 LL |
523 | struct amdgpu_job *job; |
524 | struct amdgpu_ib *ib; | |
525 | struct dma_fence *f = NULL; | |
25547cfd | 526 | uint64_t dummy; |
2d531d81 LL |
527 | int i, r; |
528 | ||
529 | r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); | |
530 | if (r) | |
531 | return r; | |
532 | ||
533 | ib = &job->ibs[0]; | |
25547cfd | 534 | dummy = ib->gpu_addr + 1024; |
2d531d81 | 535 | |
2d531d81 | 536 | ib->length_dw = 0; |
25547cfd LL |
537 | ib->ptr[ib->length_dw++] = 0x00000018; |
538 | ib->ptr[ib->length_dw++] = 0x00000001; | |
2d531d81 | 539 | ib->ptr[ib->length_dw++] = handle; |
25547cfd LL |
540 | ib->ptr[ib->length_dw++] = upper_32_bits(dummy); |
541 | ib->ptr[ib->length_dw++] = dummy; | |
542 | ib->ptr[ib->length_dw++] = 0x0000000b; | |
2d531d81 | 543 | |
25547cfd LL |
544 | ib->ptr[ib->length_dw++] = 0x00000014; |
545 | ib->ptr[ib->length_dw++] = 0x00000002; | |
546 | ib->ptr[ib->length_dw++] = 0x0000001c; | |
2d531d81 | 547 | ib->ptr[ib->length_dw++] = 0x00000000; |
2d531d81 LL |
548 | ib->ptr[ib->length_dw++] = 0x00000000; |
549 | ||
25547cfd LL |
550 | ib->ptr[ib->length_dw++] = 0x00000008; |
551 | ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */ | |
2d531d81 LL |
552 | |
553 | for (i = ib->length_dw; i < ib_size_dw; ++i) | |
554 | ib->ptr[i] = 0x0; | |
555 | ||
25547cfd LL |
556 | r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); |
557 | job->fence = dma_fence_get(f); | |
558 | if (r) | |
559 | goto err; | |
2d531d81 | 560 | |
25547cfd | 561 | amdgpu_job_free(job); |
2d531d81 LL |
562 | if (fence) |
563 | *fence = dma_fence_get(f); | |
564 | dma_fence_put(f); | |
25547cfd | 565 | |
2d531d81 LL |
566 | return 0; |
567 | ||
568 | err: | |
569 | amdgpu_job_free(job); | |
570 | return r; | |
571 | } | |
572 | ||
2d531d81 LL |
573 | int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) |
574 | { | |
575 | struct dma_fence *fence = NULL; | |
576 | long r; | |
577 | ||
578 | r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL); | |
579 | if (r) { | |
580 | DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); | |
581 | goto error; | |
582 | } | |
583 | ||
25547cfd | 584 | r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence); |
2d531d81 LL |
585 | if (r) { |
586 | DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); | |
587 | goto error; | |
588 | } | |
589 | ||
590 | r = dma_fence_wait_timeout(fence, false, timeout); | |
591 | if (r == 0) { | |
592 | DRM_ERROR("amdgpu: IB test timed out.\n"); | |
593 | r = -ETIMEDOUT; | |
594 | } else if (r < 0) { | |
595 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | |
596 | } else { | |
9953b72f | 597 | DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); |
2d531d81 LL |
598 | r = 0; |
599 | } | |
600 | error: | |
601 | dma_fence_put(fence); | |
602 | return r; | |
603 | } | |
b1d37606 BZ |
604 | |
605 | int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring) | |
606 | { | |
607 | struct amdgpu_device *adev = ring->adev; | |
608 | uint32_t tmp = 0; | |
609 | unsigned i; | |
610 | int r; | |
611 | ||
612 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD); | |
613 | r = amdgpu_ring_alloc(ring, 3); | |
614 | ||
615 | if (r) { | |
616 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", | |
617 | ring->idx, r); | |
618 | return r; | |
619 | } | |
620 | ||
621 | amdgpu_ring_write(ring, | |
622 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0, 0, 0)); | |
623 | amdgpu_ring_write(ring, 0xDEADBEEF); | |
624 | amdgpu_ring_commit(ring); | |
625 | ||
626 | for (i = 0; i < adev->usec_timeout; i++) { | |
627 | tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID)); | |
628 | if (tmp == 0xDEADBEEF) | |
629 | break; | |
630 | DRM_UDELAY(1); | |
631 | } | |
632 | ||
633 | if (i < adev->usec_timeout) { | |
634 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", | |
635 | ring->idx, i); | |
636 | } else { | |
637 | DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", | |
638 | ring->idx, tmp); | |
639 | r = -EINVAL; | |
640 | } | |
641 | ||
642 | return r; | |
643 | } | |
6173040f BZ |
644 | |
645 | static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle, | |
646 | struct dma_fence **fence) | |
647 | { | |
648 | struct amdgpu_device *adev = ring->adev; | |
649 | struct amdgpu_job *job; | |
650 | struct amdgpu_ib *ib; | |
651 | struct dma_fence *f = NULL; | |
652 | const unsigned ib_size_dw = 16; | |
653 | int i, r; | |
654 | ||
655 | r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); | |
656 | if (r) | |
657 | return r; | |
658 | ||
659 | ib = &job->ibs[0]; | |
660 | ||
661 | ib->ptr[0] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH), 0, 0, PACKETJ_TYPE0); | |
662 | ib->ptr[1] = 0xDEADBEEF; | |
663 | for (i = 2; i < 16; i += 2) { | |
664 | ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6); | |
665 | ib->ptr[i+1] = 0; | |
666 | } | |
667 | ib->length_dw = 16; | |
668 | ||
669 | r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); | |
670 | job->fence = dma_fence_get(f); | |
671 | if (r) | |
672 | goto err; | |
673 | ||
674 | amdgpu_job_free(job); | |
675 | if (fence) | |
676 | *fence = dma_fence_get(f); | |
677 | dma_fence_put(f); | |
678 | ||
679 | return 0; | |
680 | ||
681 | err: | |
682 | amdgpu_job_free(job); | |
683 | return r; | |
684 | } | |
685 | ||
686 | int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |
687 | { | |
688 | struct amdgpu_device *adev = ring->adev; | |
689 | uint32_t tmp = 0; | |
690 | unsigned i; | |
691 | struct dma_fence *fence = NULL; | |
692 | long r = 0; | |
693 | ||
694 | r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence); | |
695 | if (r) { | |
696 | DRM_ERROR("amdgpu: failed to set jpeg register (%ld).\n", r); | |
697 | goto error; | |
698 | } | |
699 | ||
700 | r = dma_fence_wait_timeout(fence, false, timeout); | |
701 | if (r == 0) { | |
702 | DRM_ERROR("amdgpu: IB test timed out.\n"); | |
703 | r = -ETIMEDOUT; | |
704 | goto error; | |
705 | } else if (r < 0) { | |
706 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | |
707 | goto error; | |
708 | } else | |
709 | r = 0; | |
710 | ||
711 | for (i = 0; i < adev->usec_timeout; i++) { | |
712 | tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH)); | |
713 | if (tmp == 0xDEADBEEF) | |
714 | break; | |
715 | DRM_UDELAY(1); | |
716 | } | |
717 | ||
718 | if (i < adev->usec_timeout) | |
719 | DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); | |
720 | else { | |
721 | DRM_ERROR("ib test failed (0x%08X)\n", tmp); | |
722 | r = -EINVAL; | |
723 | } | |
724 | ||
725 | dma_fence_put(fence); | |
726 | ||
727 | error: | |
728 | return r; | |
729 | } |