Commit | Line | Data |
---|---|---|
d93f7937 CK |
1 | /* |
2 | * Copyright 2013 Advanced Micro Devices, Inc. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | * Authors: Christian König <christian.koenig@amd.com> | |
26 | */ | |
27 | ||
28 | #include <linux/firmware.h> | |
29 | #include <linux/module.h> | |
30 | #include <drm/drmP.h> | |
31 | #include <drm/drm.h> | |
32 | ||
33 | #include "radeon.h" | |
34 | #include "radeon_asic.h" | |
35 | #include "sid.h" | |
36 | ||
03afe6f6 AD |
37 | /* 1 second timeout */ |
38 | #define VCE_IDLE_TIMEOUT_MS 1000 | |
39 | ||
d93f7937 CK |
40 | /* Firmware Names */ |
41 | #define FIRMWARE_BONAIRE "radeon/BONAIRE_vce.bin" | |
42 | ||
43 | MODULE_FIRMWARE(FIRMWARE_BONAIRE); | |
44 | ||
03afe6f6 AD |
45 | static void radeon_vce_idle_work_handler(struct work_struct *work); |
46 | ||
d93f7937 CK |
47 | /** |
48 | * radeon_vce_init - allocate memory, load vce firmware | |
49 | * | |
50 | * @rdev: radeon_device pointer | |
51 | * | |
52 | * First step to get VCE online, allocate memory and load the firmware | |
53 | */ | |
54 | int radeon_vce_init(struct radeon_device *rdev) | |
55 | { | |
98ccc291 CK |
56 | static const char *fw_version = "[ATI LIB=VCEFW,"; |
57 | static const char *fb_version = "[ATI LIB=VCEFWSTATS,"; | |
58 | unsigned long size; | |
59 | const char *fw_name, *c; | |
60 | uint8_t start, mid, end; | |
d93f7937 CK |
61 | int i, r; |
62 | ||
03afe6f6 AD |
63 | INIT_DELAYED_WORK(&rdev->vce.idle_work, radeon_vce_idle_work_handler); |
64 | ||
d93f7937 CK |
65 | switch (rdev->family) { |
66 | case CHIP_BONAIRE: | |
67 | case CHIP_KAVERI: | |
68 | case CHIP_KABINI: | |
d71c48f6 | 69 | case CHIP_HAWAII: |
428beddd | 70 | case CHIP_MULLINS: |
d93f7937 CK |
71 | fw_name = FIRMWARE_BONAIRE; |
72 | break; | |
73 | ||
74 | default: | |
75 | return -EINVAL; | |
76 | } | |
77 | ||
78 | r = request_firmware(&rdev->vce_fw, fw_name, rdev->dev); | |
79 | if (r) { | |
80 | dev_err(rdev->dev, "radeon_vce: Can't load firmware \"%s\"\n", | |
81 | fw_name); | |
82 | return r; | |
83 | } | |
84 | ||
98ccc291 CK |
85 | /* search for firmware version */ |
86 | ||
87 | size = rdev->vce_fw->size - strlen(fw_version) - 9; | |
88 | c = rdev->vce_fw->data; | |
89 | for (;size > 0; --size, ++c) | |
90 | if (strncmp(c, fw_version, strlen(fw_version)) == 0) | |
91 | break; | |
92 | ||
93 | if (size == 0) | |
94 | return -EINVAL; | |
95 | ||
96 | c += strlen(fw_version); | |
97 | if (sscanf(c, "%2hhd.%2hhd.%2hhd]", &start, &mid, &end) != 3) | |
98 | return -EINVAL; | |
99 | ||
100 | /* search for feedback version */ | |
101 | ||
102 | size = rdev->vce_fw->size - strlen(fb_version) - 3; | |
103 | c = rdev->vce_fw->data; | |
104 | for (;size > 0; --size, ++c) | |
105 | if (strncmp(c, fb_version, strlen(fb_version)) == 0) | |
106 | break; | |
107 | ||
108 | if (size == 0) | |
109 | return -EINVAL; | |
110 | ||
111 | c += strlen(fb_version); | |
112 | if (sscanf(c, "%2u]", &rdev->vce.fb_version) != 1) | |
113 | return -EINVAL; | |
114 | ||
115 | DRM_INFO("Found VCE firmware/feedback version %hhd.%hhd.%hhd / %d!\n", | |
116 | start, mid, end, rdev->vce.fb_version); | |
117 | ||
118 | rdev->vce.fw_version = (start << 24) | (mid << 16) | (end << 8); | |
119 | ||
120 | /* we can only work with this fw version for now */ | |
c11d75c8 CK |
121 | if ((rdev->vce.fw_version != ((40 << 24) | (2 << 16) | (2 << 8))) && |
122 | (rdev->vce.fw_version != ((50 << 24) | (0 << 16) | (1 << 8))) && | |
123 | (rdev->vce.fw_version != ((50 << 24) | (1 << 16) | (2 << 8)))) | |
98ccc291 CK |
124 | return -EINVAL; |
125 | ||
b03b4e4b | 126 | /* allocate firmware, stack and heap BO */ |
98ccc291 | 127 | |
fa0cf2f2 | 128 | size = vce_v2_0_bo_size(rdev); |
98ccc291 | 129 | r = radeon_bo_create(rdev, size, PAGE_SIZE, true, |
831b6966 ML |
130 | RADEON_GEM_DOMAIN_VRAM, 0, NULL, NULL, |
131 | &rdev->vce.vcpu_bo); | |
d93f7937 CK |
132 | if (r) { |
133 | dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r); | |
134 | return r; | |
135 | } | |
136 | ||
b03b4e4b CK |
137 | r = radeon_bo_reserve(rdev->vce.vcpu_bo, false); |
138 | if (r) { | |
139 | radeon_bo_unref(&rdev->vce.vcpu_bo); | |
140 | dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r); | |
d93f7937 | 141 | return r; |
b03b4e4b | 142 | } |
d93f7937 | 143 | |
b03b4e4b CK |
144 | r = radeon_bo_pin(rdev->vce.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, |
145 | &rdev->vce.gpu_addr); | |
146 | radeon_bo_unreserve(rdev->vce.vcpu_bo); | |
147 | if (r) { | |
148 | radeon_bo_unref(&rdev->vce.vcpu_bo); | |
149 | dev_err(rdev->dev, "(%d) VCE bo pin failed\n", r); | |
d93f7937 | 150 | return r; |
b03b4e4b | 151 | } |
d93f7937 CK |
152 | |
153 | for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { | |
154 | atomic_set(&rdev->vce.handles[i], 0); | |
155 | rdev->vce.filp[i] = NULL; | |
156 | } | |
157 | ||
158 | return 0; | |
159 | } | |
160 | ||
161 | /** | |
162 | * radeon_vce_fini - free memory | |
163 | * | |
164 | * @rdev: radeon_device pointer | |
165 | * | |
166 | * Last step on VCE teardown, free firmware memory | |
167 | */ | |
168 | void radeon_vce_fini(struct radeon_device *rdev) | |
169 | { | |
b03b4e4b CK |
170 | if (rdev->vce.vcpu_bo == NULL) |
171 | return; | |
172 | ||
d93f7937 | 173 | radeon_bo_unref(&rdev->vce.vcpu_bo); |
b03b4e4b CK |
174 | |
175 | release_firmware(rdev->vce_fw); | |
d93f7937 CK |
176 | } |
177 | ||
178 | /** | |
179 | * radeon_vce_suspend - unpin VCE fw memory | |
180 | * | |
181 | * @rdev: radeon_device pointer | |
182 | * | |
d93f7937 CK |
183 | */ |
184 | int radeon_vce_suspend(struct radeon_device *rdev) | |
185 | { | |
b03b4e4b | 186 | int i; |
d93f7937 CK |
187 | |
188 | if (rdev->vce.vcpu_bo == NULL) | |
189 | return 0; | |
190 | ||
b03b4e4b CK |
191 | for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) |
192 | if (atomic_read(&rdev->vce.handles[i])) | |
193 | break; | |
194 | ||
195 | if (i == RADEON_MAX_VCE_HANDLES) | |
196 | return 0; | |
197 | ||
198 | /* TODO: suspending running encoding sessions isn't supported */ | |
199 | return -EINVAL; | |
d93f7937 CK |
200 | } |
201 | ||
202 | /** | |
203 | * radeon_vce_resume - pin VCE fw memory | |
204 | * | |
205 | * @rdev: radeon_device pointer | |
206 | * | |
d93f7937 CK |
207 | */ |
208 | int radeon_vce_resume(struct radeon_device *rdev) | |
209 | { | |
b03b4e4b | 210 | void *cpu_addr; |
d93f7937 CK |
211 | int r; |
212 | ||
213 | if (rdev->vce.vcpu_bo == NULL) | |
214 | return -EINVAL; | |
215 | ||
216 | r = radeon_bo_reserve(rdev->vce.vcpu_bo, false); | |
217 | if (r) { | |
d93f7937 CK |
218 | dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r); |
219 | return r; | |
220 | } | |
221 | ||
b03b4e4b | 222 | r = radeon_bo_kmap(rdev->vce.vcpu_bo, &cpu_addr); |
d93f7937 CK |
223 | if (r) { |
224 | radeon_bo_unreserve(rdev->vce.vcpu_bo); | |
d93f7937 CK |
225 | dev_err(rdev->dev, "(%d) VCE map failed\n", r); |
226 | return r; | |
227 | } | |
228 | ||
b03b4e4b CK |
229 | memcpy(cpu_addr, rdev->vce_fw->data, rdev->vce_fw->size); |
230 | ||
231 | radeon_bo_kunmap(rdev->vce.vcpu_bo); | |
232 | ||
d93f7937 CK |
233 | radeon_bo_unreserve(rdev->vce.vcpu_bo); |
234 | ||
235 | return 0; | |
236 | } | |
237 | ||
03afe6f6 AD |
238 | /** |
239 | * radeon_vce_idle_work_handler - power off VCE | |
240 | * | |
241 | * @work: pointer to work structure | |
242 | * | |
243 | * power of VCE when it's not used any more | |
244 | */ | |
245 | static void radeon_vce_idle_work_handler(struct work_struct *work) | |
246 | { | |
247 | struct radeon_device *rdev = | |
248 | container_of(work, struct radeon_device, vce.idle_work.work); | |
249 | ||
250 | if ((radeon_fence_count_emitted(rdev, TN_RING_TYPE_VCE1_INDEX) == 0) && | |
251 | (radeon_fence_count_emitted(rdev, TN_RING_TYPE_VCE2_INDEX) == 0)) { | |
252 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { | |
253 | radeon_dpm_enable_vce(rdev, false); | |
254 | } else { | |
255 | radeon_set_vce_clocks(rdev, 0, 0); | |
256 | } | |
257 | } else { | |
258 | schedule_delayed_work(&rdev->vce.idle_work, | |
259 | msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)); | |
260 | } | |
261 | } | |
262 | ||
263 | /** | |
264 | * radeon_vce_note_usage - power up VCE | |
265 | * | |
266 | * @rdev: radeon_device pointer | |
267 | * | |
268 | * Make sure VCE is powerd up when we want to use it | |
269 | */ | |
270 | void radeon_vce_note_usage(struct radeon_device *rdev) | |
271 | { | |
272 | bool streams_changed = false; | |
273 | bool set_clocks = !cancel_delayed_work_sync(&rdev->vce.idle_work); | |
274 | set_clocks &= schedule_delayed_work(&rdev->vce.idle_work, | |
275 | msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)); | |
276 | ||
277 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { | |
278 | /* XXX figure out if the streams changed */ | |
279 | streams_changed = false; | |
280 | } | |
281 | ||
282 | if (set_clocks || streams_changed) { | |
283 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { | |
284 | radeon_dpm_enable_vce(rdev, true); | |
285 | } else { | |
286 | radeon_set_vce_clocks(rdev, 53300, 40000); | |
287 | } | |
288 | } | |
289 | } | |
290 | ||
d93f7937 CK |
291 | /** |
292 | * radeon_vce_free_handles - free still open VCE handles | |
293 | * | |
294 | * @rdev: radeon_device pointer | |
295 | * @filp: drm file pointer | |
296 | * | |
297 | * Close all VCE handles still open by this file pointer | |
298 | */ | |
299 | void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp) | |
300 | { | |
301 | int i, r; | |
302 | for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { | |
303 | uint32_t handle = atomic_read(&rdev->vce.handles[i]); | |
304 | if (!handle || rdev->vce.filp[i] != filp) | |
305 | continue; | |
306 | ||
03afe6f6 AD |
307 | radeon_vce_note_usage(rdev); |
308 | ||
d93f7937 CK |
309 | r = radeon_vce_get_destroy_msg(rdev, TN_RING_TYPE_VCE1_INDEX, |
310 | handle, NULL); | |
311 | if (r) | |
312 | DRM_ERROR("Error destroying VCE handle (%d)!\n", r); | |
313 | ||
314 | rdev->vce.filp[i] = NULL; | |
315 | atomic_set(&rdev->vce.handles[i], 0); | |
316 | } | |
317 | } | |
318 | ||
319 | /** | |
320 | * radeon_vce_get_create_msg - generate a VCE create msg | |
321 | * | |
322 | * @rdev: radeon_device pointer | |
323 | * @ring: ring we should submit the msg to | |
324 | * @handle: VCE session handle to use | |
325 | * @fence: optional fence to return | |
326 | * | |
327 | * Open up a stream for HW test | |
328 | */ | |
329 | int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring, | |
330 | uint32_t handle, struct radeon_fence **fence) | |
331 | { | |
332 | const unsigned ib_size_dw = 1024; | |
333 | struct radeon_ib ib; | |
334 | uint64_t dummy; | |
335 | int i, r; | |
336 | ||
337 | r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4); | |
338 | if (r) { | |
339 | DRM_ERROR("radeon: failed to get ib (%d).\n", r); | |
340 | return r; | |
341 | } | |
342 | ||
343 | dummy = ib.gpu_addr + 1024; | |
344 | ||
345 | /* stitch together an VCE create msg */ | |
346 | ib.length_dw = 0; | |
347 | ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ | |
348 | ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ | |
349 | ib.ptr[ib.length_dw++] = handle; | |
350 | ||
351 | ib.ptr[ib.length_dw++] = 0x00000030; /* len */ | |
352 | ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */ | |
353 | ib.ptr[ib.length_dw++] = 0x00000000; | |
354 | ib.ptr[ib.length_dw++] = 0x00000042; | |
355 | ib.ptr[ib.length_dw++] = 0x0000000a; | |
356 | ib.ptr[ib.length_dw++] = 0x00000001; | |
357 | ib.ptr[ib.length_dw++] = 0x00000080; | |
358 | ib.ptr[ib.length_dw++] = 0x00000060; | |
359 | ib.ptr[ib.length_dw++] = 0x00000100; | |
360 | ib.ptr[ib.length_dw++] = 0x00000100; | |
361 | ib.ptr[ib.length_dw++] = 0x0000000c; | |
362 | ib.ptr[ib.length_dw++] = 0x00000000; | |
363 | ||
364 | ib.ptr[ib.length_dw++] = 0x00000014; /* len */ | |
365 | ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ | |
366 | ib.ptr[ib.length_dw++] = upper_32_bits(dummy); | |
367 | ib.ptr[ib.length_dw++] = dummy; | |
368 | ib.ptr[ib.length_dw++] = 0x00000001; | |
369 | ||
370 | for (i = ib.length_dw; i < ib_size_dw; ++i) | |
371 | ib.ptr[i] = 0x0; | |
372 | ||
1538a9e0 | 373 | r = radeon_ib_schedule(rdev, &ib, NULL, false); |
d93f7937 CK |
374 | if (r) { |
375 | DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); | |
376 | } | |
377 | ||
378 | if (fence) | |
379 | *fence = radeon_fence_ref(ib.fence); | |
380 | ||
381 | radeon_ib_free(rdev, &ib); | |
382 | ||
383 | return r; | |
384 | } | |
385 | ||
386 | /** | |
387 | * radeon_vce_get_destroy_msg - generate a VCE destroy msg | |
388 | * | |
389 | * @rdev: radeon_device pointer | |
390 | * @ring: ring we should submit the msg to | |
391 | * @handle: VCE session handle to use | |
392 | * @fence: optional fence to return | |
393 | * | |
394 | * Close up a stream for HW test or if userspace failed to do so | |
395 | */ | |
396 | int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring, | |
397 | uint32_t handle, struct radeon_fence **fence) | |
398 | { | |
399 | const unsigned ib_size_dw = 1024; | |
400 | struct radeon_ib ib; | |
401 | uint64_t dummy; | |
402 | int i, r; | |
403 | ||
404 | r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4); | |
405 | if (r) { | |
406 | DRM_ERROR("radeon: failed to get ib (%d).\n", r); | |
407 | return r; | |
408 | } | |
409 | ||
410 | dummy = ib.gpu_addr + 1024; | |
411 | ||
412 | /* stitch together an VCE destroy msg */ | |
413 | ib.length_dw = 0; | |
414 | ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ | |
415 | ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ | |
416 | ib.ptr[ib.length_dw++] = handle; | |
417 | ||
418 | ib.ptr[ib.length_dw++] = 0x00000014; /* len */ | |
419 | ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ | |
420 | ib.ptr[ib.length_dw++] = upper_32_bits(dummy); | |
421 | ib.ptr[ib.length_dw++] = dummy; | |
422 | ib.ptr[ib.length_dw++] = 0x00000001; | |
423 | ||
424 | ib.ptr[ib.length_dw++] = 0x00000008; /* len */ | |
425 | ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */ | |
426 | ||
427 | for (i = ib.length_dw; i < ib_size_dw; ++i) | |
428 | ib.ptr[i] = 0x0; | |
429 | ||
1538a9e0 | 430 | r = radeon_ib_schedule(rdev, &ib, NULL, false); |
d93f7937 CK |
431 | if (r) { |
432 | DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); | |
433 | } | |
434 | ||
435 | if (fence) | |
436 | *fence = radeon_fence_ref(ib.fence); | |
437 | ||
438 | radeon_ib_free(rdev, &ib); | |
439 | ||
440 | return r; | |
441 | } | |
442 | ||
443 | /** | |
444 | * radeon_vce_cs_reloc - command submission relocation | |
445 | * | |
446 | * @p: parser context | |
447 | * @lo: address of lower dword | |
448 | * @hi: address of higher dword | |
2fc5703a | 449 | * @size: size of checker for relocation buffer |
d93f7937 CK |
450 | * |
451 | * Patch relocation inside command stream with real buffer address | |
452 | */ | |
2fc5703a LL |
453 | int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, |
454 | unsigned size) | |
d93f7937 CK |
455 | { |
456 | struct radeon_cs_chunk *relocs_chunk; | |
1d0c0942 | 457 | struct radeon_bo_list *reloc; |
2fc5703a | 458 | uint64_t start, end, offset; |
d93f7937 CK |
459 | unsigned idx; |
460 | ||
6d2d13dd | 461 | relocs_chunk = p->chunk_relocs; |
d93f7937 CK |
462 | offset = radeon_get_ib_value(p, lo); |
463 | idx = radeon_get_ib_value(p, hi); | |
464 | ||
465 | if (idx >= relocs_chunk->length_dw) { | |
466 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", | |
467 | idx, relocs_chunk->length_dw); | |
468 | return -EINVAL; | |
469 | } | |
470 | ||
466be338 | 471 | reloc = &p->relocs[(idx / 4)]; |
2fc5703a LL |
472 | start = reloc->gpu_offset; |
473 | end = start + radeon_bo_size(reloc->robj); | |
474 | start += offset; | |
d93f7937 | 475 | |
2fc5703a LL |
476 | p->ib.ptr[lo] = start & 0xFFFFFFFF; |
477 | p->ib.ptr[hi] = start >> 32; | |
478 | ||
479 | if (end <= start) { | |
480 | DRM_ERROR("invalid reloc offset %llX!\n", offset); | |
481 | return -EINVAL; | |
482 | } | |
483 | if ((end - start) < size) { | |
484 | DRM_ERROR("buffer to small (%d / %d)!\n", | |
485 | (unsigned)(end - start), size); | |
486 | return -EINVAL; | |
487 | } | |
d93f7937 CK |
488 | |
489 | return 0; | |
490 | } | |
491 | ||
2fc5703a LL |
492 | /** |
493 | * radeon_vce_validate_handle - validate stream handle | |
494 | * | |
495 | * @p: parser context | |
496 | * @handle: handle to validate | |
29c63fe2 | 497 | * @allocated: allocated a new handle? |
2fc5703a LL |
498 | * |
499 | * Validates the handle and return the found session index or -EINVAL | |
500 | * we we don't have another free session index. | |
501 | */ | |
29c63fe2 CK |
502 | static int radeon_vce_validate_handle(struct radeon_cs_parser *p, |
503 | uint32_t handle, bool *allocated) | |
2fc5703a LL |
504 | { |
505 | unsigned i; | |
506 | ||
29c63fe2 CK |
507 | *allocated = false; |
508 | ||
2fc5703a LL |
509 | /* validate the handle */ |
510 | for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { | |
29c63fe2 CK |
511 | if (atomic_read(&p->rdev->vce.handles[i]) == handle) { |
512 | if (p->rdev->vce.filp[i] != p->filp) { | |
513 | DRM_ERROR("VCE handle collision detected!\n"); | |
514 | return -EINVAL; | |
515 | } | |
2fc5703a | 516 | return i; |
29c63fe2 | 517 | } |
2fc5703a LL |
518 | } |
519 | ||
520 | /* handle not found try to alloc a new one */ | |
521 | for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { | |
522 | if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) { | |
523 | p->rdev->vce.filp[i] = p->filp; | |
524 | p->rdev->vce.img_size[i] = 0; | |
29c63fe2 | 525 | *allocated = true; |
2fc5703a LL |
526 | return i; |
527 | } | |
528 | } | |
529 | ||
530 | DRM_ERROR("No more free VCE handles!\n"); | |
531 | return -EINVAL; | |
532 | } | |
533 | ||
d93f7937 CK |
534 | /** |
535 | * radeon_vce_cs_parse - parse and validate the command stream | |
536 | * | |
537 | * @p: parser context | |
538 | * | |
539 | */ | |
540 | int radeon_vce_cs_parse(struct radeon_cs_parser *p) | |
541 | { | |
2fc5703a | 542 | int session_idx = -1; |
29c63fe2 | 543 | bool destroyed = false, created = false, allocated = false; |
2fc5703a LL |
544 | uint32_t tmp, handle = 0; |
545 | uint32_t *size = &tmp; | |
29c63fe2 | 546 | int i, r = 0; |
d93f7937 | 547 | |
6d2d13dd | 548 | while (p->idx < p->chunk_ib->length_dw) { |
d93f7937 CK |
549 | uint32_t len = radeon_get_ib_value(p, p->idx); |
550 | uint32_t cmd = radeon_get_ib_value(p, p->idx + 1); | |
551 | ||
552 | if ((len < 8) || (len & 3)) { | |
553 | DRM_ERROR("invalid VCE command length (%d)!\n", len); | |
29c63fe2 CK |
554 | r = -EINVAL; |
555 | goto out; | |
d93f7937 CK |
556 | } |
557 | ||
2fc5703a LL |
558 | if (destroyed) { |
559 | DRM_ERROR("No other command allowed after destroy!\n"); | |
29c63fe2 CK |
560 | r = -EINVAL; |
561 | goto out; | |
2fc5703a LL |
562 | } |
563 | ||
d93f7937 CK |
564 | switch (cmd) { |
565 | case 0x00000001: // session | |
566 | handle = radeon_get_ib_value(p, p->idx + 2); | |
29c63fe2 CK |
567 | session_idx = radeon_vce_validate_handle(p, handle, |
568 | &allocated); | |
2fc5703a LL |
569 | if (session_idx < 0) |
570 | return session_idx; | |
571 | size = &p->rdev->vce.img_size[session_idx]; | |
d93f7937 CK |
572 | break; |
573 | ||
574 | case 0x00000002: // task info | |
2fc5703a LL |
575 | break; |
576 | ||
d93f7937 | 577 | case 0x01000001: // create |
29c63fe2 CK |
578 | created = true; |
579 | if (!allocated) { | |
580 | DRM_ERROR("Handle already in use!\n"); | |
581 | r = -EINVAL; | |
582 | goto out; | |
583 | } | |
584 | ||
2fc5703a LL |
585 | *size = radeon_get_ib_value(p, p->idx + 8) * |
586 | radeon_get_ib_value(p, p->idx + 10) * | |
587 | 8 * 3 / 2; | |
588 | break; | |
589 | ||
d93f7937 CK |
590 | case 0x04000001: // config extension |
591 | case 0x04000002: // pic control | |
592 | case 0x04000005: // rate control | |
593 | case 0x04000007: // motion estimation | |
594 | case 0x04000008: // rdo | |
1957d6be | 595 | case 0x04000009: // vui |
d93f7937 CK |
596 | break; |
597 | ||
598 | case 0x03000001: // encode | |
2fc5703a LL |
599 | r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9, |
600 | *size); | |
d93f7937 | 601 | if (r) |
29c63fe2 | 602 | goto out; |
d93f7937 | 603 | |
2fc5703a LL |
604 | r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11, |
605 | *size / 3); | |
d93f7937 | 606 | if (r) |
29c63fe2 | 607 | goto out; |
d93f7937 CK |
608 | break; |
609 | ||
610 | case 0x02000001: // destroy | |
2fc5703a | 611 | destroyed = true; |
d93f7937 CK |
612 | break; |
613 | ||
614 | case 0x05000001: // context buffer | |
2fc5703a LL |
615 | r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, |
616 | *size * 2); | |
617 | if (r) | |
29c63fe2 | 618 | goto out; |
2fc5703a LL |
619 | break; |
620 | ||
d93f7937 | 621 | case 0x05000004: // video bitstream buffer |
2fc5703a LL |
622 | tmp = radeon_get_ib_value(p, p->idx + 4); |
623 | r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, | |
624 | tmp); | |
625 | if (r) | |
29c63fe2 | 626 | goto out; |
2fc5703a LL |
627 | break; |
628 | ||
d93f7937 | 629 | case 0x05000005: // feedback buffer |
2fc5703a LL |
630 | r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, |
631 | 4096); | |
d93f7937 | 632 | if (r) |
29c63fe2 | 633 | goto out; |
d93f7937 CK |
634 | break; |
635 | ||
636 | default: | |
637 | DRM_ERROR("invalid VCE command (0x%x)!\n", cmd); | |
29c63fe2 CK |
638 | r = -EINVAL; |
639 | goto out; | |
d93f7937 CK |
640 | } |
641 | ||
2fc5703a LL |
642 | if (session_idx == -1) { |
643 | DRM_ERROR("no session command at start of IB\n"); | |
29c63fe2 CK |
644 | r = -EINVAL; |
645 | goto out; | |
2fc5703a LL |
646 | } |
647 | ||
d93f7937 CK |
648 | p->idx += len / 4; |
649 | } | |
650 | ||
29c63fe2 CK |
651 | if (allocated && !created) { |
652 | DRM_ERROR("New session without create command!\n"); | |
653 | r = -ENOENT; | |
654 | } | |
655 | ||
656 | out: | |
657 | if ((!r && destroyed) || (r && allocated)) { | |
658 | /* | |
659 | * IB contains a destroy msg or we have allocated an | |
660 | * handle and got an error, anyway free the handle | |
661 | */ | |
d93f7937 CK |
662 | for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) |
663 | atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0); | |
d93f7937 CK |
664 | } |
665 | ||
29c63fe2 | 666 | return r; |
d93f7937 CK |
667 | } |
668 | ||
669 | /** | |
670 | * radeon_vce_semaphore_emit - emit a semaphore command | |
671 | * | |
672 | * @rdev: radeon_device pointer | |
673 | * @ring: engine to use | |
674 | * @semaphore: address of semaphore | |
675 | * @emit_wait: true=emit wait, false=emit signal | |
676 | * | |
677 | */ | |
678 | bool radeon_vce_semaphore_emit(struct radeon_device *rdev, | |
679 | struct radeon_ring *ring, | |
680 | struct radeon_semaphore *semaphore, | |
681 | bool emit_wait) | |
682 | { | |
683 | uint64_t addr = semaphore->gpu_addr; | |
684 | ||
685 | radeon_ring_write(ring, VCE_CMD_SEMAPHORE); | |
686 | radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); | |
687 | radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); | |
688 | radeon_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0)); | |
689 | if (!emit_wait) | |
690 | radeon_ring_write(ring, VCE_CMD_END); | |
691 | ||
692 | return true; | |
693 | } | |
694 | ||
695 | /** | |
696 | * radeon_vce_ib_execute - execute indirect buffer | |
697 | * | |
698 | * @rdev: radeon_device pointer | |
699 | * @ib: the IB to execute | |
700 | * | |
701 | */ | |
702 | void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) | |
703 | { | |
704 | struct radeon_ring *ring = &rdev->ring[ib->ring]; | |
705 | radeon_ring_write(ring, VCE_CMD_IB); | |
706 | radeon_ring_write(ring, ib->gpu_addr); | |
707 | radeon_ring_write(ring, upper_32_bits(ib->gpu_addr)); | |
708 | radeon_ring_write(ring, ib->length_dw); | |
709 | } | |
710 | ||
711 | /** | |
712 | * radeon_vce_fence_emit - add a fence command to the ring | |
713 | * | |
714 | * @rdev: radeon_device pointer | |
715 | * @fence: the fence | |
716 | * | |
717 | */ | |
718 | void radeon_vce_fence_emit(struct radeon_device *rdev, | |
719 | struct radeon_fence *fence) | |
720 | { | |
721 | struct radeon_ring *ring = &rdev->ring[fence->ring]; | |
681941c1 | 722 | uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr; |
d93f7937 CK |
723 | |
724 | radeon_ring_write(ring, VCE_CMD_FENCE); | |
725 | radeon_ring_write(ring, addr); | |
726 | radeon_ring_write(ring, upper_32_bits(addr)); | |
727 | radeon_ring_write(ring, fence->seq); | |
728 | radeon_ring_write(ring, VCE_CMD_TRAP); | |
729 | radeon_ring_write(ring, VCE_CMD_END); | |
730 | } | |
731 | ||
732 | /** | |
733 | * radeon_vce_ring_test - test if VCE ring is working | |
734 | * | |
735 | * @rdev: radeon_device pointer | |
736 | * @ring: the engine to test on | |
737 | * | |
738 | */ | |
739 | int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) | |
740 | { | |
741 | uint32_t rptr = vce_v1_0_get_rptr(rdev, ring); | |
742 | unsigned i; | |
743 | int r; | |
744 | ||
745 | r = radeon_ring_lock(rdev, ring, 16); | |
746 | if (r) { | |
747 | DRM_ERROR("radeon: vce failed to lock ring %d (%d).\n", | |
748 | ring->idx, r); | |
749 | return r; | |
750 | } | |
751 | radeon_ring_write(ring, VCE_CMD_END); | |
1538a9e0 | 752 | radeon_ring_unlock_commit(rdev, ring, false); |
d93f7937 CK |
753 | |
754 | for (i = 0; i < rdev->usec_timeout; i++) { | |
755 | if (vce_v1_0_get_rptr(rdev, ring) != rptr) | |
756 | break; | |
757 | DRM_UDELAY(1); | |
758 | } | |
759 | ||
760 | if (i < rdev->usec_timeout) { | |
761 | DRM_INFO("ring test on %d succeeded in %d usecs\n", | |
762 | ring->idx, i); | |
763 | } else { | |
764 | DRM_ERROR("radeon: ring %d test failed\n", | |
765 | ring->idx); | |
766 | r = -ETIMEDOUT; | |
767 | } | |
768 | ||
769 | return r; | |
770 | } | |
771 | ||
772 | /** | |
773 | * radeon_vce_ib_test - test if VCE IBs are working | |
774 | * | |
775 | * @rdev: radeon_device pointer | |
776 | * @ring: the engine to test on | |
777 | * | |
778 | */ | |
779 | int radeon_vce_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) | |
780 | { | |
781 | struct radeon_fence *fence = NULL; | |
782 | int r; | |
783 | ||
784 | r = radeon_vce_get_create_msg(rdev, ring->idx, 1, NULL); | |
785 | if (r) { | |
786 | DRM_ERROR("radeon: failed to get create msg (%d).\n", r); | |
787 | goto error; | |
788 | } | |
789 | ||
790 | r = radeon_vce_get_destroy_msg(rdev, ring->idx, 1, &fence); | |
791 | if (r) { | |
792 | DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r); | |
793 | goto error; | |
794 | } | |
795 | ||
796 | r = radeon_fence_wait(fence, false); | |
797 | if (r) { | |
798 | DRM_ERROR("radeon: fence wait failed (%d).\n", r); | |
799 | } else { | |
800 | DRM_INFO("ib test on ring %d succeeded\n", ring->idx); | |
801 | } | |
802 | error: | |
803 | radeon_fence_unref(&fence); | |
804 | return r; | |
805 | } |